pax_global_header00006660000000000000000000000064115063171510014512gustar00rootroot0000000000000052 comment=b9376213cdbcf6dd3e35fdd8c3b42cff678dc26b curvedns-curvedns-0.87/000077500000000000000000000000001150631715100151305ustar00rootroot00000000000000curvedns-curvedns-0.87/ChangeLog000066400000000000000000000011701150631715100167010ustar00rootroot000000000000002010-12-28 0.87 - Sometimes CurveDNS closed sockfd 0. -- Harm van Tilborg, suggested by Leo Vandewoestijne and Dan Bernstein. - Some small logging improvements. -- Harm van Tilborg - Fixed sendto(2) in *BSD environment, was expecting exact address length, instead of largest (struct sockaddr_in6) one. -- Harm van Tilborg - Source IP when target is contacted can be specified in CURVEDNS_SOURCE_IP -- Harm van Tilborg, suggested by Hauke Lampe. - configure.curvedns now also accepts ABI as first argument -- Harm van Tilborg, suggested by Maciej Zenczykowski. 2010-10-23 0.86 Initial release curvedns-curvedns-0.87/INSTALL000066400000000000000000000007351150631715100161660ustar00rootroot00000000000000 CurveDNS INSTALL ===================================================== For the latest and detailed installation instructions see http://curvedns.on2it.net/docs#install For people in a hurry, a (very) short instruction: 1) Be sure to have libev (+ dev headers) installed 2) ./configure.nacl -- takes a while 3) ./configure.curvedns -- answer possible questions 4) make 5) Copy both curvedns and curvedns-keygen to your preferred path. curvedns-curvedns-0.87/LICENSE000066400000000000000000000027371150631715100161460ustar00rootroot00000000000000Copyright 2010 CurveDNS Project. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY CurveDNS Project ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CurveDNS Project OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of CurveDNS Project. curvedns-curvedns-0.87/Makefile.in000066400000000000000000000041741150631715100172030ustar00rootroot00000000000000# CurveDNS' Makefile template # # The @VAR@ tokens will be replaced by configure.curvedns ABI=@ABI@ NACLLIB=nacl/build/lib/$(ABI) NACLINC=nacl/build/include/$(ABI) CDNSCFLAGS=-Wall -fno-strict-aliasing -O3 -I$(NACLINC) # If you have libev at a non-standard place, specify that here: #EV= #EVCFLAGS=-I$(EV)/include #EVLDFLAGS=-L$(EV)/lib CC=@CC@ CFLAGS=@CFLAGS@ $(CDNSCFLAGS) $(EVCFLAGS) LDFLAGS=-L$(NACLLIB) $(EVLDFLAGS) # do not edit below EXTRALIB=-lev TARGETS=curvedns-keygen curvedns .PHONY: targets clean distclean install targets: $(TARGETS) clean: rm -f *.a *.o $(TARGETS) distclean: clean rm -f Makefile install: @echo Sorry, no automated install. Copy the following binaries to your preferred destination path: @echo " $(TARGETS)" debug.o: debug.c debug.h $(CC) $(CFLAGS) -c debug.c cache_hashtable.o: cache_hashtable.c cache_hashtable.h debug.o $(CC) $(CFLAGS) -c cache_hashtable.c # ready for possible critbit addition cache.a: cache_hashtable.o $(AR) cr cache.a cache_hashtable.o ranlib cache.a dns.o: dns.c dns.h debug.o event.a $(CC) $(CFLAGS) -c dns.c dnscurve.o: dnscurve.c dnscurve.h debug.o event.a $(CC) $(CFLAGS) -c dnscurve.c curvedns.o: curvedns.c curvedns.h debug.o ip.o misc.o $(CC) $(CFLAGS) -c curvedns.c ip.o: ip.c ip.h debug.o $(CC) $(CFLAGS) -c ip.c event_tcp.o: event_tcp.c event.h debug.o ip.o cache.a $(CC) $(CFLAGS) -c event_tcp.c event_udp.o: event_udp.c event.h debug.o ip.o cache.a $(CC) $(CFLAGS) -c event_udp.c event_main.o: event_main.c event.h debug.o ip.o cache.a $(CC) $(CFLAGS) -c event_main.c event.a: event_main.o event_udp.o event_tcp.o $(AR) cr event.a event_main.o event_udp.o event_tcp.o ranlib event.a misc.o: misc.c misc.h ip.o debug.o $(CC) $(CFLAGS) -c misc.c curvedns-keygen.o: curvedns-keygen.c $(CC) $(CFLAGS) -c curvedns-keygen.c # The targets: curvedns: debug.o ip.o misc.o cache.a event.a dnscurve.o dns.o curvedns.o $(CC) $(LDFLAGS) debug.o ip.o misc.o dnscurve.o dns.o cache.a event.a curvedns.o $(EXTRALIB) -lnacl -o curvedns curvedns-keygen: curvedns-keygen.o debug.o ip.o misc.o $(CC) $(LDFLAGS) curvedns-keygen.o debug.o ip.o misc.o -lnacl -o curvedns-keygen curvedns-curvedns-0.87/README000066400000000000000000000003231150631715100160060ustar00rootroot00000000000000 CurveDNS README =================================================== For installation and documentation related matters please refer to INSTALL. License information can be found in LICENSE. curvedns-curvedns-0.87/VERSION000066400000000000000000000000051150631715100161730ustar00rootroot000000000000000.87 curvedns-curvedns-0.87/cache_hashtable.c000066400000000000000000000144401150631715100203550ustar00rootroot00000000000000/* * Copyright 2010 CurveDNS Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY CurveDNS Project ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CurveDNS Project OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of CurveDNS Project. * */ /* * $Id$ * $Author$ * $Date$ * $Revision$ */ #include "cache_hashtable.h" struct cache_table *dnscurve_cache = NULL; static unsigned int cache_hash(uint8_t *key) { unsigned int hash = 5381; uint8_t i; // djb's hash function for (i = 0; i < CACHE_KEY_SIZE; i++) hash = ((hash << 5) + hash) + key[i]; return hash; } void cache_stats(struct cache_table *table) { struct cache_entry *entry; int i, j; if (!table) return; if (debug_level >= DEBUG_DEBUG) { debug_log(DEBUG_DEBUG, "cache_stats(): usage: %d/%d, ", table->nrused, table->nrentries); for (i = 0; i < table->nrbuckets; i++) { j = 0; entry = table->buckets[i]; while (entry) { j++; entry = entry->next; } debug_log(DEBUG_DEBUG, "[%d]: %d, ", i, j); } debug_log(DEBUG_DEBUG, "\n"); } } struct cache_table *cache_init(int nrbuckets, int nrentries) { struct cache_table *table = NULL; int i; if ((nrbuckets < 1) || (nrentries < 1) || (nrentries < nrbuckets)) goto wrong; table = (struct cache_table *) malloc(sizeof(struct cache_table)); if (!table) goto wrong; memset(table, 0, sizeof(struct cache_table)); table->buckets = (struct cache_entry **) malloc(nrbuckets * sizeof(struct cache_entry *)); if (!table->buckets) goto wrong; memset(table->buckets, 0, nrbuckets * sizeof(struct cache_entry *)); table->entries = (struct cache_entry *) malloc(nrentries * sizeof(struct cache_entry)); if (!table->entries) goto wrong; memset(table->entries, 0, nrentries * sizeof(struct cache_entry)); // XXX: cache, don't do this array-based table->headunused = table->entries; i = 0; while (i < (nrentries - 1)) { table->headunused[i].nexttable = &table->headunused[i+1]; i++; } table->nrbuckets = nrbuckets; table->nrentries = nrentries; table->nrused = 0; table->headused = NULL; table->lastused = NULL; debug_log(DEBUG_INFO, "cache_init(): allocated %zd bytes in total for the shared secret cache structure\n", (nrbuckets * sizeof(struct cache_entry *)) + (nrentries * sizeof(struct cache_entry)) + sizeof(struct cache_table)); return table; wrong: debug_log(DEBUG_ERROR, "cache_init(): something went wrong while initializing\n"); cache_destroy(table); return NULL; } struct cache_entry *cache_get(struct cache_table *table, uint8_t *key) { struct cache_entry *entry = NULL; unsigned int hash; if (!table || !key || !table->nrused) goto wrong; hash = (cache_hash(key) % table->nrbuckets); entry = table->buckets[hash]; while (entry) { if (memcmp(entry->key, key, CACHE_KEY_SIZE) == 0) return entry; entry = entry->next; } wrong: return NULL; } struct cache_entry *cache_set(struct cache_table *table, uint8_t *key, uint8_t *value) { struct cache_entry *entry = NULL; unsigned int hash; if (!table || !key || !value) goto wrong; cache_stats(table); if (table->nrused >= table->nrentries) { debug_log(DEBUG_DEBUG, "cache_set(): hashtable full - forcing oldest one out\n"); if (table->headunused) { debug_log(DEBUG_ERROR, "cache_set(): unused headunused != NULL\n"); goto wrong; } entry = table->headused; table->headused = table->headused->nexttable; if (entry->prev) entry->prev->next = NULL; else { hash = (cache_hash(entry->key) % table->nrbuckets); table->buckets[hash] = NULL; } } else { table->nrused++; entry = table->headunused; table->headunused = table->headunused->nexttable; if (!table->headused) table->headused = entry; } entry->prev = NULL; entry->next = NULL; entry->nexttable = NULL; memcpy(entry->key, key, CACHE_KEY_SIZE); memcpy(entry->value, value, CACHE_VALUE_SIZE); if (table->lastused) table->lastused->nexttable = entry; table->lastused = entry; hash = (cache_hash(entry->key) % table->nrbuckets); if (table->buckets[hash]) { table->buckets[hash]->prev = entry; entry->next = table->buckets[hash]; } table->buckets[hash] = entry; return entry; wrong: return NULL; } int cache_empty(struct cache_table *table) { struct cache_entry *entry = NULL, *tmpentry = NULL; int i; if (!table) goto wrong; entry = table->headused; while (entry) { tmpentry = entry->nexttable; entry->nexttable = table->headunused; entry->next = NULL; entry->prev = NULL; table->headunused = entry; memset(entry->key, 0, CACHE_KEY_SIZE); memset(entry->value, 0, CACHE_VALUE_SIZE); entry = tmpentry; } for (i = 0; i < table->nrbuckets; i++) table->buckets[i] = NULL; table->headused = NULL; table->lastused = NULL; table->nrused = 0; return 1; wrong: return 0; } int cache_destroy(struct cache_table *table) { if (table) { if (table->entries) free(table->entries); free(table); table->headused = NULL; table->headunused = NULL; table->lastused = NULL; table->nrbuckets = -1; table->nrentries = -1; table->nrused = -1; } return 1; } curvedns-curvedns-0.87/cache_hashtable.h000066400000000000000000000054651150631715100203710ustar00rootroot00000000000000/* * Copyright 2010 CurveDNS Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY CurveDNS Project ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CurveDNS Project OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of CurveDNS Project. * */ /* * $Id$ * $Author$ * $Date$ * $Revision$ */ #ifndef CACHE_HASHTABLE_H_ #define CACHE_HASHTABLE_H_ #include #include #include #include #include #include "crypto_box_curve25519xsalsa20poly1305.h" #include "debug.h" // The cache mechanism isn't really general, it is focused // on the public key client -> shared key fetching. #define CACHE_KEY_SIZE crypto_box_curve25519xsalsa20poly1305_PUBLICKEYBYTES #define CACHE_VALUE_SIZE crypto_box_curve25519xsalsa20poly1305_BEFORENMBYTES struct cache_table { struct cache_entry **buckets; struct cache_entry *entries; struct cache_entry *headunused; struct cache_entry *headused, *lastused; int nrbuckets, nrentries, nrused; }; struct cache_entry { struct cache_entry *next, *prev, *nexttable; uint8_t key[CACHE_KEY_SIZE]; uint8_t value[CACHE_VALUE_SIZE]; }; extern struct cache_table *dnscurve_cache; extern void cache_stats(struct cache_table *); extern struct cache_table *cache_init(int, int); extern struct cache_entry *cache_get(struct cache_table *, uint8_t *); extern struct cache_entry *cache_set(struct cache_table *, uint8_t *, uint8_t *); extern int cache_empty(struct cache_table *); extern int cache_destroy(struct cache_table *); #endif /* CACHE_H_ */ curvedns-curvedns-0.87/configure.curvedns000077500000000000000000000054021150631715100206700ustar00rootroot00000000000000#!/usr/bin/env bash # XXX: make it POSIX-shell compatible (select isn't) start="`pwd`" top="`pwd`/nacl/build" bin="$top/bin" lib="$top/lib" include="$top/include" work="$top/work" curve="$work/curvedns" PATH="/usr/local/bin:$PATH" PATH="/usr/sfw/bin:$PATH" PATH="$bin:$PATH" export PATH # ------------------- funcs doMakefile() { sed -e "s/@ABI@/$abi/g" \ -e "s/@CC@/$compiler/" \ -e "s/@CFLAGS@/$cflags/" \ Makefile.in > Makefile } # ------------------- NaCl stuff if [ ! -d "$bin" ] || [ ! -d "$work" ]; then echo "No NaCl build that is ready for deployment yet, did you run 'configure.nacl'?" exit 1 fi if [ -z "$1" ] && ([ -f "$curve"/abi ] || [ -f "$curve"/compiler ] || [ -f "$curve"/cflags ]); then echo "You have already picked an ABI, compiler, or -options for CurveDNS:" [ -f "$curve"/abi ] && ( echo -n "ABI: "; cat "$curve"/abi ) [ -f "$curve"/compiler ] && ( echo -n "Compiler: "; cat "$curve"/compiler ) [ -f "$curve"/cflags ] && ( echo -n "Compiler options: "; cat "$curve"/cflags ) echo echo "If you want to change this, remove the following directory and rerun:" echo "$curve" if [ -f "$curve"/abi ] && [ -f "$curve"/compiler ] && [ -f "$curve"/cflags ]; then echo echo "Nevertheless, a new Makefile has been generated." abi=`cat "$curve"/abi` compiler=`cat "$curve"/compiler` cflags=`cat "$curve"/cflags` doMakefile exit 0 fi exit 1 fi rm -rf "$curve" mkdir "$curve" cd "$bin" # Saving intermediate result in a file, because subprocesses (okabi opens a pipe) kill local vars: okabi \ | while read abi do echo "$abi" > "$curve"/lastabi done abis=`okabi | wc -l` lastabi=`cat "$curve"/lastabi` rm -f "$curve"/* if [ "$abis" -gt 1 ] && [ -z "$1" ]; then echo "There are more than one application binary interfaces generated for your " echo "system, which one do you want to use for CurveDNS?" select abi in $(okabi); do if [ -n "$abi" ]; then break; fi done echo $abi > "$curve"/abi elif [ "$abis" -gt 1 ] && [ ! -z "$1" ]; then echo "$1" > "$curve"/abi else echo "$lastabi" > "$curve"/abi fi abi=`cat "$curve"/abi` if [ ! -f "okc-$abi" ]; then echo "Did not find any suitable compiler for this ABI, does the '$abi' ABI exist?" rm -rf "$curve" exit 1 fi compiler=`okc-$abi | tail -1 | cut -d' ' -f1` cflags=`okc-$abi | tail -1 | cut -d' ' -f2-` echo "$compiler" > "$curve"/compiler echo "$cflags" > "$curve"/cflags # --------------------- Regular configure stuff cd "$start" doMakefile echo "Finished configuring CurveDNS, ready for compiling." echo -n "Chosen/picked ABI: " cat "$curve"/abi echo -n "Chosen/picked compiler: " cat "$curve"/compiler echo -n "Chosen/picked compiler options: " cat "$curve"/cflags echo echo "We are now ready to compile, run 'make' to do so." curvedns-curvedns-0.87/configure.nacl000077500000000000000000000262101150631715100177540ustar00rootroot00000000000000#!/bin/sh # nacl/do # D. J. Bernstein # Edited by H.A. van Tilborg for release with CurveDNS # Public domain. project=nacl version=`cat "$project"/version` shorthostname=`hostname | sed 's/\..*//' | tr -cd '[a-z][A-Z][0-9]'` top="`pwd`/$project/build" bin="$top/bin" lib="$top/lib" include="$top/include" work="$top/work" PATH="/usr/local/bin:$PATH" PATH="/usr/sfw/bin:$PATH" PATH="$bin:$PATH" export PATH LD_LIBRARY_PATH="/usr/local/lib/sparcv9:/usr/local/lib:$LD_LIBRARY_PATH" LD_LIBRARY_PATH="/usr/sfw/lib/sparcv9:/usr/sfw/lib:$LD_LIBRARY_PATH" export LD_LIBRARY_PATH # and wacky MacOS X DYLD_LIBRARY_PATH="/usr/local/lib/sparcv9:/usr/local/lib:$DYLD_LIBRARY_PATH" DYLD_LIBRARY_PATH="/usr/sfw/lib/sparcv9:/usr/sfw/lib:$DYLD_LIBRARY_PATH" export DYLD_LIBRARY_PATH # and work around bug in GNU sort LANG=C export LANG rm -rf "$top" cd "$project" mkdir -p "$top" mkdir -p "$bin" mkdir -p "$lib" mkdir -p "$include" exec >"$top/log" exec 2>&1 exec 5>"$top/data" exec "$work/${project}_base.c" okc-$abi \ | while read compiler do ( cd "$work" && $compiler -c ${project}_base.c ) && break done okar-$abi cr "$lib/$abi/lib${project}.a" "$work/${project}_base.o" ( ranlib "$lib/$abi/lib${project}.a" || exit 0 ) done # loop over operations cat OPERATIONS \ | while read o do [ -d "$o" ] || continue selected='' [ -f "$o/selected" ] && selected=`cat "$o/selected"` # for each operation, loop over primitives ls "$o" \ | sort \ | while read p do [ -d "$o/$p" ] || continue expectedchecksum='' [ -f "$o/$p/checksum" ] && expectedchecksum=`cat "$o/$p/checksum"` op="${o}_${p}" startdate=`date +%Y%m%d` # for each operation primitive, loop over abis okabi \ | while read abi do echo "=== `date` === $abi $o/$p" libs=`"oklibs-$abi"` libs="$lib/$abi/cpucycles.o $libs" [ -f "$lib/$abi/lib${project}.a" ] && libs="$lib/$abi/lib${project}.a $libs" rm -rf "$work" mkdir -p "$work" mkdir -p "$work/best" # for each operation primitive abi, loop over implementations find "$o/$p" -follow -name "api.h" \ | sort \ | while read doth do implementationdir=`dirname $doth` opi=`echo "$implementationdir" | tr ./- ___` echo "=== `date` === $abi $implementationdir" rm -rf "$work/compile" mkdir -p "$work/compile" cfiles=`ls "$implementationdir" | grep '\.c$' || :` sfiles=`ls "$implementationdir" | grep '\.[sS]$' || :` cppfiles=`ls "$o" | grep '\.cpp$' || :` cp -p "$o"/*.c "$work/compile/" cp -p "$o"/*.cpp "$work/compile/" cp -pr "$implementationdir"/* "$work/compile" cp -p "try-anything.c" "$work/compile/try-anything.c" cp -p "measure-anything.c" "$work/compile/measure-anything.c" cp -p MACROS "$work/compile/MACROS" cp -p PROTOTYPES.c "$work/compile/PROTOTYPES.c" cp -p PROTOTYPES.cpp "$work/compile/PROTOTYPES.cpp" ( cd "$work/compile" ( echo "#ifndef ${o}_H" echo "#define ${o}_H" echo "" echo "#include \"${op}.h\"" echo "" egrep "${o}"'$|'"${o}"'\(|'"${o}"'_' < MACROS \ | sed "s/$o/$op/" | while read mop do echo "#define ${mop} ${mop}" | sed "s/$op/$o/" done echo "#define ${o}_PRIMITIVE \"${p}\"" echo "#define ${o}_IMPLEMENTATION ${op}_IMPLEMENTATION" echo "#define ${o}_VERSION ${op}_VERSION" echo "" echo "#endif" ) > "$o.h" ( echo "#ifndef ${op}_H" echo "#define ${op}_H" echo "" sed 's/[ ]CRYPTO_/ '"${opi}"'_/g' < api.h echo '#ifdef __cplusplus' echo '#include ' egrep "${o}"'$|'"${o}"'\(|'"${o}"'_' < PROTOTYPES.cpp \ | sed "s/$o/$opi/" echo 'extern "C" {' echo '#endif' egrep "${o}"'$|'"${o}"'\(|'"${o}"'_' < PROTOTYPES.c \ | sed "s/$o/$opi/" echo '#ifdef __cplusplus' echo '}' echo '#endif' echo "" egrep "${o}"'$|'"${o}"'\(|'"${o}"'_' < MACROS \ | sed "s/$o/$opi/" | while read mopi do echo "#define ${mopi} ${mopi}" | sed "s/$opi/$op/" done echo "#define ${op}_IMPLEMENTATION \"${implementationdir}\"" echo "#ifndef ${opi}_VERSION" echo "#define ${opi}_VERSION \"-\"" echo "#endif" echo "#define ${op}_VERSION ${opi}_VERSION" echo "" echo "#endif" ) > "$op.h" okc-$abi \ | while read compiler do echo "=== `date` === $abi $implementationdir $compiler" compilerword=`echo "$compiler" | tr ' ' '_'` ok=1 for f in $cfiles $sfiles do if [ "$ok" = 1 ] then $compiler \ -I. -I"$include" -I"$include/$abi" \ -c "$f" >../errors 2>&1 || ok=0 ( if [ `wc -l < ../errors` -lt 25 ] then cat ../errors else head ../errors echo ... tail ../errors fi ) \ | while read err do echo "$version $shorthostname $abi $startdate $o $p fromcompiler $implementationdir $compilerword $f $err" >&5 done fi done [ "$ok" = 1 ] || continue okar-$abi cr "$op.a" *.o || continue ranlib "$op.a" $compiler \ -I. -I"$include" -I"$include/$abi" \ -o try try.c try-anything.c \ "$op.a" $libs >../errors 2>&1 || ok=0 cat ../errors \ | while read err do echo "$version $shorthostname $abi $startdate $o $p fromcompiler $implementationdir $compilerword try.c $err" >&5 done [ "$ok" = 1 ] || continue if sh -c './try || exit $?' >../outputs 2>../errors then checksum=`awk '{print $1}' < ../outputs` cycles=`awk '{print $2}' < ../outputs` checksumcycles=`awk '{print $3}' < ../outputs` cyclespersecond=`awk '{print $4}' < ../outputs` impl=`awk '{print $5}' < ../outputs` else echo "$version $shorthostname $abi $startdate $o $p tryfails $implementationdir $compilerword error $?" >&5 cat ../outputs ../errors \ | while read err do echo "$version $shorthostname $abi $startdate $o $p tryfails $implementationdir $compilerword $err" >&5 done continue fi checksumok=fails [ "x$expectedchecksum" = "x$checksum" ] && checksumok=ok [ "x$expectedchecksum" = "x" ] && checksumok=unknown echo "$version $shorthostname $abi $startdate $o $p try $checksum $checksumok $cycles $checksumcycles $cyclespersecond $impl $compilerword" >&5 [ "$checksumok" = fails ] && continue [ -s ../bestmedian ] && [ `cat ../bestmedian` -le $cycles ] && continue echo "$cycles" > ../bestmedian $compiler -D'COMPILER="'"$compiler"'"' \ -DLOOPS=1 \ -I. -I"$include" -I"$include/$abi" \ -o measure measure.c measure-anything.c \ "$op.a" $libs >../errors 2>&1 || ok=0 cat ../errors \ | while read err do echo "$version $shorthostname $abi $startdate $o $p fromcompiler $implementationdir $compilerword measure.c $err" >&5 done [ "$ok" = 1 ] || continue for f in $cppfiles do okcpp-$abi \ | while read cppcompiler do echo "=== `date` === $abi $implementationdir $cppcompiler" $cppcompiler \ -I. -I"$include" -I"$include/$abi" \ -c "$f" && break done done rm -f ../best/*.o ../best/measure || continue for f in *.o do cp -p "$f" "../best/${opi}-$f" done cp -p "$op.h" "../$op.h" cp -p "$o.h" "../$o.h" cp -p measure ../best/measure done ) done echo "=== `date` === $abi $o/$p measuring" "$work/best/measure" \ | while read measurement do echo "$version $shorthostname $abi $startdate $o $p $measurement" >&5 done [ -f "$o/$p/used" ] \ && okar-$abi cr "$lib/$abi/lib${project}.a" "$work/best"/*.o \ && ( ranlib "$lib/$abi/lib${project}.a" || exit 0 ) \ && cp -p "$work/$op.h" "$include/$abi/$op.h" \ && [ -f "$o/$p/selected" ] \ && cp -p "$work/$o.h" "$include/$abi/$o.h" \ || : done done done for language in c cpp do for bintype in commandline tests do ls $bintype \ | sed -n 's/\.'$language'$//p' \ | sort \ | while read cmd do echo "=== `date` === starting $bintype/$cmd" rm -rf "$work" mkdir -p "$work/compile" cp "$bintype/$cmd.$language" "$work/compile/$cmd.$language" [ "$bintype" = tests ] && cp -p "$bintype/$cmd.out" "$work/compile/$cmd.out" okabi \ | while read abi do [ -x "$bin/$cmd" ] && break libs=`"oklibs-$abi"` libs="$lib/$abi/cpucycles.o $libs" libs="$libs $lib/$abi/randombytes.o" ok${language}-$abi \ | while read compiler do [ -x "$bin/$cmd" ] && break echo "=== `date` === $bintype/$cmd $abi $compiler" ( cd "$work/compile" if $compiler \ -I"$include" -I"$include/$abi" \ -o "$cmd" "$cmd.${language}" \ "$lib/$abi/lib${project}.a" $libs then case "$bintype" in commandline) cp -p "$cmd" "$bin/$cmd" ;; tests) "./$cmd" | cmp - "$cmd.out" || "./$cmd" ;; esac fi ) done done done done done echo "=== `date` === finishing" curvedns-curvedns-0.87/contrib/000077500000000000000000000000001150631715100165705ustar00rootroot00000000000000curvedns-curvedns-0.87/contrib/curvedns-log-run000077500000000000000000000001221150631715100217230ustar00rootroot00000000000000#!/bin/sh USER="curvedns" exec setuidgid "$USER" multilog t n25 s1048576 ./main curvedns-curvedns-0.87/contrib/curvedns-run000077500000000000000000000005771150631715100211620ustar00rootroot00000000000000#!/bin/sh USER="curvedns" # IPs to listen on (separated by comma): LISTEN_IPS="0.0.0.0" # Port to listen on (root is needed if port < 1024): LISTEN_PORT="53" # Authoritative name server (target) IP: TARGET_IP="127.0.0.1" # Authoritative name server port: TARGET_PORT="53" exec envuidgid "$USER" envdir env curvedns "$LISTEN_IPS" "$LISTEN_PORT" "$TARGET_IP" "$TARGET_PORT" 2>&1 curvedns-curvedns-0.87/curvedns-keygen.c000066400000000000000000000115201150631715100204040ustar00rootroot00000000000000/* * Copyright 2010 CurveDNS Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY CurveDNS Project ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CurveDNS Project OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of CurveDNS Project. * */ /* * $Id$ * $Author$ * $Date$ * $Revision$ */ #include #include #include #include #include #include #include "crypto_box_curve25519xsalsa20poly1305.h" #include "debug.h" #include "misc.h" extern int global_urandom_fd; char hexpublic[65], hexprivate[65]; uint8_t public[32], private[32], dnspublic[55]; // Implicitly called by crypto_box_keypair, urandom fd is file descriptor of /dev/urandom // Opening etc. is handled by misc_crypto_random_init() void randombytes(unsigned char *x, unsigned long long xlen) { int i; while (xlen > 0) { if (xlen < 1048576) i = xlen; else i = 1048576; i = read(global_urandom_fd, x, i); if (i < 1) { sleep(1); continue; } x += i; xlen -= i; } } int curvedns_env(char *path, char *name) { char fullname[256], fullpath[256]; FILE *f; struct stat st; if (strlen(name) > 200) { fprintf(stderr, "Authoritative name server name too long.\n"); return 1; } if (snprintf(fullname, sizeof(fullname), "%s.%s", dnspublic, name) < 0) return 1; if (snprintf(fullpath, sizeof(fullpath), "%s/env", path) < 0) return 1; if (stat(fullpath, &st) < 0) { if (errno != ENOENT) return 1; mkdir(fullpath, 0700); } else { if (!S_ISDIR(st.st_mode)) { fprintf(stderr, "%s is not a directory, manually remove this first\n", fullpath); return 1; } } if (snprintf(fullpath, sizeof(fullpath), "%s/env/CURVEDNS_PRIVATE_KEY", path) < 0) return 1; if (stat(fullpath, &st) == 0) { fprintf(stderr, "A private key file already exists, manually remove that first.\n"); return 1; } f = fopen(fullpath, "w"); if (!f) { fprintf(stderr, "Unable to open %s for writing.\n", fullpath); return 1; } fprintf(f, "%s\n", hexprivate); fclose(f); if (chmod(fullpath, 0400) != 0) return 1; printf("Authoritative name server name:\n%s\n", fullname); printf("DNS public key:\n%s\n", dnspublic); printf("Hex public key:\n%s\n", hexpublic); printf("Hex secret key:\n%s\n", hexprivate); printf("\n"); printf("The private key was written to %s, so it can be used inside the CurveDNS environment.\n", fullpath); return 0; } int main(int argc, char *argv[]) { unsigned dnspublic_len = sizeof(dnspublic) - 3; if (!misc_crypto_random_init()) { debug_log(DEBUG_FATAL, "unable to ensure randomness\n"); return 1; } // Generate the actual keypair: crypto_box_curve25519xsalsa20poly1305_keypair(public, private); // The DNSCurve (base32)-encoding of the PUBLIC key: memcpy(dnspublic, "uz5", 3); if (!misc_base32_encode(dnspublic + 3, &dnspublic_len, public, 32)) { perror("base32_encode"); return 1; } // The hex encoding of the PUBLIC key: if (!misc_hex_encode(public, 32, hexpublic, 64)) { perror("hex_encode"); return 1; } // The hex encoding of the PRIVATE key: if (!misc_hex_encode(private, 32, hexprivate, 64)) { perror("hex_encode"); return 1; } dnspublic[54] = 0; hexpublic[64] = 0; hexprivate[64] = 0; if (argc == 1) { printf("DNS public key:\t%s\n", dnspublic); printf("Hex public key:\t%s\n", hexpublic); printf("Hex secret key:\t%s\n", hexprivate); } else if (argc != 3) { fprintf(stderr, "Usage: %s \n", argv[0]); return 1; } else { return curvedns_env(argv[1], argv[2]); } return 0; } curvedns-curvedns-0.87/curvedns.c000066400000000000000000000202271150631715100171300ustar00rootroot00000000000000/* * Copyright 2010 CurveDNS Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY CurveDNS Project ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CurveDNS Project OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of CurveDNS Project. * */ /* * $Id$ * $Author$ * $Date$ * $Revision$ */ #include /* for AF_UNSPEC */ #include "curvedns.h" #include "misc.h" #include "ip.h" #include "event.h" #include "dnscurve.h" // The server's private key uint8_t global_secret_key[32]; // Number of shared secret caches: int global_shared_secrets = 5000; static anysin_t *local_addresses; static int local_addresses_count; static int usage(const char *argv0) { debug_log(DEBUG_FATAL, "Usage: %s \n\n", argv0); debug_log(DEBUG_FATAL, "Environment options (between []'s are optional):\n"); debug_log(DEBUG_FATAL, " CURVEDNS_PRIVATE_KEY\n\tThe hexidecimal representation of the server's private (secret) key\n"); debug_log(DEBUG_FATAL, " UID\n\tNon-root user id to run under\n"); debug_log(DEBUG_FATAL, " GID\n\tNon-root user group id to run under\n"); debug_log(DEBUG_FATAL, " [CURVEDNS_SOURCE_IP]\n\tThe IP to bind on when target server is contacted (default: [none])\n"); debug_log(DEBUG_FATAL, " [CURVEDNS_INTERNAL_TIMEOUT]\n\tNumber of seconds to declare target server timeout (default: 1.2)\n"); debug_log(DEBUG_FATAL, " [CURVEDNS_UDP_TRIES]\n\tWhen timeout to target server, how many tries in total (default: 2)\n"); debug_log(DEBUG_FATAL, " [CURVEDNS_TCP_NUMBER]\n\tNumber of simultaneous TCP connections allowed (default: 25)\n"); debug_log(DEBUG_FATAL, " [CURVEDNS_TCP_TIMEOUT]\n\tNumber of seconds before TCP session to client times out (default: 60.0)\n"); debug_log(DEBUG_FATAL, " [CURVEDNS_SHARED_SECRETS]\n\tNumber of shared secrets that can be cached (default: 5000)\n"); debug_log(DEBUG_FATAL, " [CURVEDNS_DEBUG]\n\tDebug level, 1: fatal, 2: error, 3: warning, 4: info, 5: debug (default: 2)\n"); return 1; } static int getenvoptions() { int tmpi; double tmpd; char ip[INET6_ADDRSTRLEN]; global_source_address.sa.sa_family = AF_UNSPEC; tmpi = misc_getenv_ip("CURVEDNS_SOURCE_IP", 0, &global_source_address); if (tmpi < 0) { debug_log(DEBUG_FATAL, "$CURVEDNS_SOURCE_IP is not a correct IP address\n"); return 0; } else if (tmpi) { if (global_target_address.sa.sa_family != global_source_address.sa.sa_family) { debug_log(DEBUG_FATAL, "IP address of $CURVEDNS_SOURCE_IP is not in the same family (IPv4/IPv6) as the target address\n"); return 0; } if (!ip_address_string(&global_source_address, ip, sizeof(ip))) return 0; debug_log(DEBUG_FATAL, "source IP address: %s\n", ip); } else { debug_log(DEBUG_INFO, "source IP address: [none]\n"); } if (misc_getenv_double("CURVEDNS_INTERNAL_TIMEOUT", 0, &tmpd)) { if (tmpd > 60.) tmpd = 60.; else if (tmpd < 0.01) tmpd = 0.01; global_ip_internal_timeout = (ev_tstamp) tmpd; debug_log(DEBUG_FATAL, "internal timeout set to %.2f seconds\n", global_ip_internal_timeout); } else { debug_log(DEBUG_INFO, "internal timeout: %.2f seconds\n", global_ip_internal_timeout); } if (misc_getenv_int("CURVEDNS_UDP_TRIES", 0, &tmpi)) { if (tmpi > 50) tmpi = 50; else if (tmpi < 1) tmpi = 1; global_ip_udp_retries = tmpi; debug_log(DEBUG_FATAL, "UDP retries set to %d time(s)\n", global_ip_udp_retries); } else { debug_log(DEBUG_INFO, "UDP retries: %d time(s)\n", global_ip_udp_retries); } if (misc_getenv_int("CURVEDNS_TCP_NUMBER", 0, &tmpi)) { if (tmpi > 500) tmpi = 500; else if (tmpi < 1) tmpi = 1; global_ip_tcp_max_number_connections = tmpi; debug_log(DEBUG_FATAL, "number of simultaneous TCP connections set to %d\n", global_ip_tcp_max_number_connections); } else { debug_log(DEBUG_INFO, "number of simultaneous TCP connections: %d\n", global_ip_tcp_max_number_connections); } if (misc_getenv_double("CURVEDNS_TCP_TIMEOUT", 0, &tmpd)) { if (tmpd > 86400.) tmpd = 86400.; else if (tmpd < 1.0) tmpd = 1.0; global_ip_tcp_external_timeout = (ev_tstamp) tmpd; debug_log(DEBUG_FATAL, "TCP client timeout set to %.2f seconds\n", global_ip_tcp_external_timeout); } else { debug_log(DEBUG_INFO, "TCP client timeout: %.2f seconds\n", global_ip_tcp_external_timeout); } if (misc_getenv_int("CURVEDNS_SHARED_SECRETS", 0, &tmpi)) { if (tmpi > 50) global_shared_secrets = tmpi; debug_log(DEBUG_FATAL, "shared secret cached set to %d positions\n", global_shared_secrets); } else { debug_log(DEBUG_INFO, "shared secret cache: %d positions\n", global_shared_secrets); } return 1; } int main(int argc, char *argv[]) { int uid, gid, tmp; if (argc != 5) return usage(argv[0]); // First determine debug level: if (misc_getenv_int("CURVEDNS_DEBUG", 0, &tmp)) { if ((tmp > 0) && (tmp < 6)) debug_level = tmp; } debug_log(DEBUG_FATAL, "starting %s version %s (debug level %d)\n", argv[0], CURVEDNS_VERSION, debug_level); // Parse the listening IP addresses: local_addresses = ip_multiple_parse(&local_addresses_count, argv[1], argv[2]); if (!local_addresses) { debug_log(DEBUG_FATAL, "listening IPs or port malformed\n"); return 1; } // Parse target IP: if (!ip_parse(&global_target_address, argv[3], argv[4])) return usage(argv[0]); // XXX: should be handled by ip_parse() :/? if (global_target_address.sa.sa_family == AF_INET) { global_target_address_len = sizeof(struct sockaddr_in); } else { global_target_address_len = sizeof(struct sockaddr_in6); } // Open urandom for randomness during run: if (!misc_crypto_random_init()) { debug_log(DEBUG_FATAL, "unable to open /dev/urandom for randomness\n"); return 1; } // Fetch the secret key from environment and setup: if (!misc_getenv_key("CURVEDNS_PRIVATE_KEY", 1, global_secret_key)) return 1; // Fetch group id: if (!misc_getenv_int("GID", 1, &gid)) return 1; // Fetch user id: if (!misc_getenv_int("UID", 1, &uid)) return 1; // Open UDP and TCP sockets on local address(es): if (!ip_init(local_addresses, local_addresses_count)) { debug_log(DEBUG_FATAL, "ip_init(): failed, are you root?\n"); return 1; } // Do exactly this ;] debug_log(DEBUG_INFO, "main(): throwing away root privileges\n"); if (setgid(gid) != 0) { debug_log(DEBUG_FATAL, "main(): unable to set gid\n"); return 1; } if (setuid(uid) != 0) { debug_log(DEBUG_FATAL, "main(): unable to set uid\n"); return 1; } // Fetch all optional options from the environment: if (!getenvoptions()) return 1; // Initialize the event handler, the core of CurveDNS: if (!event_init()) { debug_log(DEBUG_FATAL, "event_init(): failed\n"); return 1; } // Initialize the DNSCurve part (such as the shared secret cache): if (!dnscurve_init()) { debug_log(DEBUG_FATAL, "dnscurve_init(): failed\n"); return 1; } // Start the event worker: event_worker(); // Should only be reached when loop is destroyed (at SIGINT and SIGTERM): return 0; } curvedns-curvedns-0.87/curvedns.h000066400000000000000000000037351150631715100171420ustar00rootroot00000000000000/* * Copyright 2010 CurveDNS Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY CurveDNS Project ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CurveDNS Project OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of CurveDNS Project. * */ /* * $Id$ * $Author$ * $Date$ * $Revision$ */ #ifndef CURVEDNS_H_ #define CURVEDNS_H_ #define CURVEDNS_VERSION "0.87" #include #include #include #include #include #include #include #include "debug.h" extern uint8_t global_secret_key[32]; extern int global_urandom_fd; extern int global_shared_secrets; #endif /* CURVEDNS_H_ */ curvedns-curvedns-0.87/debug.c000066400000000000000000000035471150631715100163730ustar00rootroot00000000000000/* * Copyright 2010 CurveDNS Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY CurveDNS Project ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CurveDNS Project OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of CurveDNS Project. * */ /* * $Id$ * $Author$ * $Date$ * $Revision$ */ #include "debug.h" // Standard error level: ERROR int debug_level = DEBUG_ERROR; void debug_log(int level, char *format, ...) { if (level <= debug_level) { va_list args; va_start(args, format); vfprintf(stderr, format, args); va_end(args); } } curvedns-curvedns-0.87/debug.h000066400000000000000000000036761150631715100164030ustar00rootroot00000000000000/* * Copyright 2010 CurveDNS Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY CurveDNS Project ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CurveDNS Project OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of CurveDNS Project. * */ /* * $Id$ * $Author$ * $Date$ * $Revision$ */ #ifndef DEBUG_H_ #define DEBUG_H_ #include #include #define DEBUG_DEBUG 5 #define DEBUG_INFO 4 #define DEBUG_WARN 3 #define DEBUG_ERROR 2 #define DEBUG_FATAL 1 extern void debug_log(int, char *, ...); extern int debug_level; #define debug_fatal(x, ...) debug_log(DEBUG_FATAL, (x), __VA_ARGS__) #endif /* DEBUG_H_ */ curvedns-curvedns-0.87/dns.c000066400000000000000000000251501150631715100160630ustar00rootroot00000000000000/* * Copyright 2010 CurveDNS Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY CurveDNS Project ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CurveDNS Project OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of CurveDNS Project. * */ /* * $Id$ * $Author$ * $Date$ * $Revision$ */ #include "dns.h" #include "misc.h" #include "dnscurve.h" // From Matthew Demspky's prototype code // (Indirectly from djbdns' dns_packet.c) unsigned int dns_packet_getname(uint8_t *name, unsigned int namemax, const uint8_t *buf, unsigned int len, unsigned int pos) { unsigned int loop = 0; unsigned int state = 0; unsigned int firstcompress = 0; unsigned int where; uint8_t ch; unsigned int namelen = 0; for (;;) { if (pos >= len) goto PROTO; ch = buf[pos++]; if (++loop >= 4096) goto PROTO; if (state) { if (namelen + 1 > namemax) goto PROTO; name[namelen++] = ch; --state; } else { while (ch >= 192) { where = ch; where -= 192; where <<= 8; if (pos >= len) goto PROTO; ch = buf[pos++]; if (!firstcompress) firstcompress = pos; pos = where + ch; if (pos >= len) goto PROTO; ch = buf[pos++]; if (++loop >= 4096) goto PROTO; } if (ch >= 64) goto PROTO; if (namelen + 1 > namemax) goto PROTO; name[namelen++] = ch; if (!ch) break; state = ch; } } if (firstcompress) return firstcompress; return pos; PROTO: errno = EPROTO; return 0; } int dns_analyze_query(event_entry_t *general_entry) { // There is nothing to analyze in regular DNS, so go directly to DNSCurve analyzement: return dnscurve_analyze_query(general_entry); } int dns_forward_query_udp(event_entry_t *general_entry) { int sock, n; struct event_udp_entry *entry = &general_entry->udp; if (!ip_udp_open(&sock, &global_target_address)) { debug_log(DEBUG_ERROR, "dns_forward_query_udp(): unable to open a UDP socket to forward query to authoritative server\n"); goto wrong; } // randomize the outgoing source port and set the source IP address, if needed if (!ip_bind_random(sock)) { // if this fails, let the kernel handle it (would mean source IP address is not guaranteed...) debug_log(DEBUG_WARN, "dns_forward_query_udp(): unable to bind to source IP address and/or random port\n"); } entry->state = EVENT_UDP_INT_WRITING; entry->read_int_watcher.data = general_entry; entry->timeout_int_watcher.data = general_entry; entry->retries++; // Now generate a new TXID to forecome any poisoning: entry->buffer[0] = misc_crypto_random(256); entry->buffer[1] = misc_crypto_random(256); entry->dns.dsttxid = (entry->buffer[0] << 8) + entry->buffer[1]; ev_io_init(&entry->read_int_watcher, event_udp_int_cb, sock, EV_READ); ev_timer_init(&entry->timeout_int_watcher, event_udp_timeout_cb, 0., global_ip_internal_timeout); ev_io_start(event_default_loop, &entry->read_int_watcher); ev_timer_again(event_default_loop, &entry->timeout_int_watcher); debug_log(DEBUG_INFO, "dns_forward_query_udp(): forwarding query to authoritative name server (prev id = %d, new id = %d)\n", (entry->dns.type == DNS_DNSCURVE_STREAMLINED || entry->dns.type == DNS_NON_DNSCURVE) ? entry->dns.srctxid : entry->dns.srcinsidetxid, entry->dns.dsttxid); n = sendto(sock, entry->buffer, entry->packetsize, MSG_DONTWAIT, (struct sockaddr *) &global_target_address.sa, global_target_address_len); if (n == -1) { debug_log(DEBUG_ERROR, "dns_forward_query_udp(): unable to forward the query to authoritative name server (%s)\n", strerror(errno)); goto wrong; } return 1; wrong: return 0; } int dns_forward_query_tcp(event_entry_t *general_entry) { struct event_tcp_entry *entry = &general_entry->tcp; if (!ip_tcp_open(&entry->intsock, &global_target_address)) { debug_log(DEBUG_ERROR, "dns_forward_query_tcp(): unable to open TCP socket\n"); goto wrong; } // randomizing port is not really necessary, as TCP is invulnerable to cache poisoning // however, the source IP address is set in ip_bind_random... if (!ip_bind_random(entry->intsock)) { // if this fails, let the kernel handle it (would mean source IP address is not guaranteed...) debug_log(DEBUG_WARN, "dns_forward_query_tcp(): unable to bind to source IP address and/or random port\n"); } if (!ip_connect(entry->intsock, &global_target_address)) { debug_log(DEBUG_ERROR, "dns_forward_query_tcp(): unable to connect to authoritative name server (%s)\n", strerror(errno)); goto wrong; } // Now generate a new TXID to forecome any poisoning: entry->buffer[0] = misc_crypto_random(256); entry->buffer[1] = misc_crypto_random(256); // XXX: do this platform safe (i.e. ntoh) entry->dns.dsttxid = (entry->buffer[0] << 8) + entry->buffer[1]; debug_log(DEBUG_INFO, "dns_forward_query_tcp(): forwarding query to authoritative name server (prev id = %d, new id = %d)\n", entry->dns.srctxid, entry->dns.dsttxid); return 1; wrong: return 0; } int dns_analyze_reply_query(event_entry_t *general_entry) { struct event_general_entry *entry = &general_entry->general; uint16_t recvtxid; if (entry->packetsize < 12) { debug_log(DEBUG_INFO, "dns_analyze_reply_query(): received response is too small (no DNS header)\n"); goto wrong; } // Check the received id with the one we sent: recvtxid = (entry->buffer[0] << 8) + entry->buffer[1]; if (entry->dns.dsttxid != recvtxid) { debug_log(DEBUG_WARN, "dns_analyze_reply_query(): received txid differ!\n"); goto wrong; } // Now set the right response id, depending on the query type: if ((entry->dns.type == DNS_NON_DNSCURVE) || (entry->dns.type == DNS_DNSCURVE_STREAMLINED)) { entry->buffer[0] = entry->dns.srctxid >> 8; entry->buffer[1] = entry->dns.srctxid & 0xff; } else { entry->buffer[0] = entry->dns.srcinsidetxid >> 8; entry->buffer[1] = entry->dns.srcinsidetxid & 0xff; } return 1; wrong: return 0; } int dns_reply_query_udp(event_entry_t *general_entry) { struct event_udp_entry *entry = &general_entry->udp; socklen_t addresslen; int n; if (entry->dns.type == DNS_NON_DNSCURVE) { debug_log(DEBUG_INFO, "dns_reply_query_udp(): sending DNS response in regular format\n"); } else if (entry->dns.type == DNS_DNSCURVE_STREAMLINED) { debug_log(DEBUG_INFO, "dns_reply_query_udp(): sending DNS response in streamlined DNSCurve format\n"); if (!dnscurve_reply_streamlined_query(general_entry)) { debug_log(DEBUG_WARN, "dns_reply_query_udp(): failed to reply in streamlined format\n"); goto wrong; } } else if ((entry->dns.type == DNS_DNSCURVE_TXT_RD_SET) || (entry->dns.type == DNS_DNSCURVE_TXT_RD_UNSET)) { debug_log(DEBUG_INFO, "dns_reply_query_udp(): sending DNS response in DNSCurve TXT format\n"); if (!dnscurve_reply_txt_query(general_entry)) { debug_log(DEBUG_WARN, "dns_reply_query_udp(): failed to reply in TXT format\n"); goto wrong; } } entry->state = EVENT_UDP_EXT_WRITING; if (entry->address.sa.sa_family == AF_INET) { addresslen = sizeof(struct sockaddr_in); } else { addresslen = sizeof(struct sockaddr_in6); } n = sendto(entry->sock->fd, entry->buffer, entry->packetsize, MSG_DONTWAIT, (struct sockaddr *) &entry->address.sa, addresslen); if (n == -1) { debug_log(DEBUG_ERROR, "dns_reply_query_udp(): unable to send the response to the client (%s)\n", strerror(errno)); goto wrong; } return 1; wrong: return 0; } /* int dns_reply_nxdomain_query_udp(event_entry_t *general_entry) { struct event_udp_entry *entry = &general_entry->udp; socklen_t addresslen = sizeof(CHECK THIS); // XXX: make NXDOMAIN reply with DNSCurve if (entry->dns.type == DNS_NON_DNSCURVE) { entry->state = EVENT_UDP_EXT_WRITING; entry->buffer[0] = entry->dns.srctxid >> 8; entry->buffer[1] = entry->dns.srctxid & 0xff; entry->buffer[2] |= 0x80; entry->buffer[3] = (entry->buffer[3] & 0xf0) | 3; sendto(entry->sock->fd, entry->buffer, entry->packetsize, MSG_DONTWAIT, (struct sockaddr *) &entry->address.sa, addresslen); } else if (entry->dns.type == DNS_DNSCURVE_STREAMLINED) { debug_log(DEBUG_INFO, "dns_reply_nxdomain_query_udp(): doing NXDOMAIN reply in streamlined format - not implemented yet\n"); // XXX: to fix! } else if ((entry->dns.type == DNS_DNSCURVE_TXT_RD_SET) || (entry->dns.type == DNS_DNSCURVE_TXT_RD_UNSET)) { debug_log(DEBUG_INFO, "dns_reply_nxdomain_query_udp(): doing NXDOMAIN reply in TXT format - not implemented yet\n"); // XXX: to fix! } else { debug_log(DEBUG_ERROR, "dns_reply_nxdomain_query_udp(): DNS type was unknown (%d)\n", entry->dns.type); goto wrong; } return 1; wrong: return 0; } */ int dns_reply_query_tcp(event_entry_t *general_entry) { struct event_tcp_entry *entry = &general_entry->tcp; if (entry->dns.type == DNS_NON_DNSCURVE) { debug_log(DEBUG_INFO, "dns_reply_query_tcp(): sending DNS response in regular format\n"); } else if (entry->dns.type == DNS_DNSCURVE_STREAMLINED) { debug_log(DEBUG_INFO, "dns_reply_query_tcp(): sending DNS response in streamlined DNSCurve format\n"); if (!dnscurve_reply_streamlined_query(general_entry)) { debug_log(DEBUG_INFO, "dns_reply_query_tcp(): failed to reply in streamlined format\n"); goto wrong; } } else if ((entry->dns.type == DNS_DNSCURVE_TXT_RD_SET) || (entry->dns.type == DNS_DNSCURVE_TXT_RD_UNSET)) { debug_log(DEBUG_INFO, "dns_reply_query_tcp(): doing reply in TXT format\n"); if (!dnscurve_reply_txt_query(general_entry)) { debug_log(DEBUG_INFO, "dns_reply_query_tcp(): failed to reply in TXT format\n"); goto wrong; } } return 1; wrong: return 0; } curvedns-curvedns-0.87/dns.h000066400000000000000000000043511150631715100160700ustar00rootroot00000000000000/* * Copyright 2010 CurveDNS Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY CurveDNS Project ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CurveDNS Project OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of CurveDNS Project. * */ /* * $Id$ * $Author$ * $Date$ * $Revision$ */ #ifndef DNS_H_ #define DNS_H_ #include #include #include #include #include "debug.h" #include "event.h" extern unsigned int dns_packet_getname(uint8_t *, unsigned int, const uint8_t *, unsigned int, unsigned int); extern int dns_analyze_query(event_entry_t *); extern int dns_analyze_reply_query(event_entry_t *); extern int dns_forward_query_udp(event_entry_t *); extern int dns_forward_query_tcp(event_entry_t *); extern int dns_reply_query_udp(event_entry_t *); extern int dns_reply_nxdomain_query_udp(event_entry_t *); extern int dns_reply_query_tcp(event_entry_t *); #endif /* DNS_H_ */ curvedns-curvedns-0.87/dnscurve.c000066400000000000000000000425461150631715100171400ustar00rootroot00000000000000/* * Copyright 2010 CurveDNS Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY CurveDNS Project ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CurveDNS Project OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of CurveDNS Project. * */ /* * $Id$ * $Author$ * $Date$ * $Revision$ */ #include "dnscurve.h" #include "misc.h" #include "curvedns.h" #include "dns.h" #include "cache_hashtable.h" // return values: // -1 -> unable to generate shared secret // 1 -> plugged from packet info // 2 -> plugged from cache // 3 -> generated, and plugged in cache static int dnscurve_get_shared_secret(struct dns_packet_t *packet) { struct cache_entry *cache_entry = NULL; uint8_t sharedsecret[32]; if (!packet) goto wrong; if (packet->ispublic) { cache_entry = cache_get(dnscurve_cache, (uint8_t *) packet->publicsharedkey); if (cache_entry) { memcpy(packet->publicsharedkey, cache_entry->value, 32); packet->ispublic = 0; debug_log(DEBUG_INFO, "dnscurve_get_shared_secret(): shared secret plugged from the cache\n"); return 2; } else { memset(sharedsecret, 0, sizeof(sharedsecret)); if (crypto_box_curve25519xsalsa20poly1305_beforenm(sharedsecret, packet->publicsharedkey, global_secret_key) == -1) goto wrong; cache_entry = cache_set(dnscurve_cache, packet->publicsharedkey, sharedsecret); if (!cache_entry) goto wrong; memcpy(packet->publicsharedkey, sharedsecret, 32); packet->ispublic = 0; debug_log(DEBUG_INFO, "dnscurve_get_shared_secret(): generated a shared secret and added to cache\n"); return 3; } } debug_log(DEBUG_INFO, "dnscurve_get_shared_secret(): shared secret already available in packet structure\n"); return 1; wrong: return -1; } int dnscurve_init() { int slots; slots = (int) (global_shared_secrets / 25); if (slots < 5) { slots = 5; } else if (slots > 500) { slots = 500; } dnscurve_cache = cache_init(slots, global_shared_secrets); debug_log(DEBUG_INFO, "dnscurve_init(): able to store %d shared secrets, spread between %d buckets\n", global_shared_secrets, slots); if (!dnscurve_cache) { debug_log(DEBUG_ERROR, "dnscurve_init(): unable to initiate cache structure\n"); goto wrong; } return 1; wrong: return 0; } /* To Matthew Dempsky */ static int dnscurve_parse_query_name(uint8_t *box, unsigned int *boxlen, uint8_t *publickey, unsigned int *zone, const uint8_t *name) { uint8_t encoded_box[4096]; unsigned int encoded_boxlen = 0; unsigned int i = 0; errno = EPROTO; // Concatenate the base32 encoded components which make up the nonce and box for (;;) { const uint8_t component_len = name[i]; if (component_len == 54) break; else if (component_len > 50) return 0; else if (component_len == 0) return 0; if ((encoded_boxlen + component_len) > sizeof(encoded_box)) goto NAMETOOLONG; memcpy(encoded_box + encoded_boxlen, name + i + 1, component_len); encoded_boxlen += component_len; i += component_len + 1; } // Base32 decode the box if (!misc_base32_decode(box, boxlen, encoded_box, encoded_boxlen, 0)) return 0; // Next is the public key, where the first three bytes are 'x1a' (case insensitive): if (name[i] != 54 || (name[i+1] & ~0x20) != 'X' || name[i+2] != '1' || (name[i+3] & ~0x20) != 'A') return 0; unsigned int publickeylen = 32; if (!misc_base32_decode(publickey, &publickeylen, name + i + 4, 51, 1)) return 0; if (publickeylen != 32) return 0; i += 54 + 1; *zone = i; errno = 0; return 1; NAMETOOLONG: errno = ENAMETOOLONG; return 0; } int dnscurve_analyze_query(event_entry_t *general_entry) { uint8_t sandbox[4096], fullnonce[24], queryname[4096]; int result; unsigned int sandboxlen = sizeof(sandbox), pos; struct event_general_entry *entry = &general_entry->general; struct dns_packet_t *packet = &entry->dns; if (entry->packetsize < 12) { debug_log(DEBUG_WARN, "dnscurve_analyze_query(): packet too small (no DNS header)\n"); goto wrong; } packet->type = DNS_NON_DNSCURVE; packet->srctxid = (entry->buffer[0] << 8) + entry->buffer[1]; // Both the streamlined and TXT format at least need 68 bytes: if (entry->packetsize < 68) { // Could be a regular: debug_log(DEBUG_DEBUG, "dnscurve_analyze_query(): query too small to be DNSCurve packet, assuming regular DNS packet\n"); return 1; } memset(sandbox, 0, 16); memset(fullnonce + 12, 0, 12); if (!memcmp(entry->buffer, "Q6fnvWj8", 8)) { packet->ispublic = 1; memcpy(packet->publicsharedkey, entry->buffer + 8, 32); memcpy(fullnonce, entry->buffer + 40, 12); memcpy(sandbox + 16, entry->buffer + 52, entry->packetsize - 52); sandboxlen = entry->packetsize - 36; // 36 = 52 - 16 bytes at front result = dnscurve_get_shared_secret(packet); if ((result < 0) || packet->ispublic) { debug_log(DEBUG_INFO, "dnscurve_analyze_query(): DNSCurve streamlined query unable to get shared secret (code = %d)\n", result); return 1; } if (debug_level >= DEBUG_DEBUG) { char tmp[65]; misc_hex_encode(packet->publicsharedkey, 32, tmp, 64); tmp[64] = '\0'; debug_log(DEBUG_DEBUG, "dnscurve_analyze_query(): DNSCurve shared secret: '%s'\n", tmp); } if (crypto_box_curve25519xsalsa20poly1305_open_afternm( sandbox, sandbox, sandboxlen, fullnonce, packet->publicsharedkey) == -1) { debug_log(DEBUG_WARN, "dnscurve_analyze_query(): DNSCurve streamlined query unable to open cryptobox\n"); return 1; } memcpy(packet->nonce, fullnonce, 12); memcpy(entry->buffer, sandbox + 32, sandboxlen - 32); entry->packetsize = sandboxlen - 32; packet->type = DNS_DNSCURVE_STREAMLINED; packet->srctxid = (entry->buffer[0] << 8) + entry->buffer[1]; debug_log(DEBUG_INFO, "dnscurve_analyze_query(): DNSCurve streamlined query received (packetsize = %zd)\n", entry->packetsize); return 1; } // Must be query, no op code, not authoritative and no truncation bit // set. In all other cases, we do not handle _any_ of such queries: if (entry->buffer[2] & 0xfe) { debug_log(DEBUG_ERROR, "dnscurve_analyze_query(): not a query\n"); goto wrong; } // In order to be DNSCurve TXT format, recursion available flag should be // unset, zero bits be zero, response code also has to be zero. Furthermore // one question, and no other number of RRs should be sent along. If this is // all the case we continue, else it could be a regular non-DNSCurve request: if (memcmp(entry->buffer + 3, "\x00" "\x00\x01" "\x00\x00" "\x00\x00" "\x00\x00", 9)) return 1; // Now load in the query name (this is always directly behind the DNS header): pos = dns_packet_getname(queryname, sizeof(queryname), entry->buffer, entry->packetsize, 12); if (!pos) return 1; // If there is no space for the two 16-bit TYPE and CLASS ids, bail out: if (entry->packetsize - pos != 4) return 1; // Check if we are dealing with a IN TXT query: if (memcmp(entry->buffer + pos, "\x00\x10" "\x00\x01", 4)) { debug_log(DEBUG_DEBUG, "dnscurve_analyze_query(): no DNSCurve TXT (not a TXT query)\n"); return 1; } // Now we can finally parse the DNSCurve things inside the query name. unsigned int zone = 0; // First 12 base32 bytes of queryname are the nonce. For the open of the // cryptobox, align it four to the right, so that the BOXZERO bytes are // already there: sandboxlen -= 4; if (!dnscurve_parse_query_name(sandbox + 4, &sandboxlen, packet->publicsharedkey, &zone, queryname)) { debug_log(DEBUG_DEBUG, "dnscurve_analyze_query(): no DNSCurve TXT (no client public key found in query name)\n"); return 1; } packet->ispublic = 1; sandboxlen += 4; // The client nonce is located at sandbox[4..16], copy it for use in the opening of the box: memcpy(fullnonce, sandbox + 4, 12); memset(fullnonce + 12, 0, 12); // The BOXZERO offset: memset(sandbox, 0, 16); result = dnscurve_get_shared_secret(packet); if ((result < 0) || packet->ispublic) { debug_log(DEBUG_INFO, "dnscurve_analyze_query(): DNSCurve TXT query unable to get shared secret (code = %d)\n", result); return 1; } if (debug_level >= DEBUG_DEBUG) { char tmp[65]; misc_hex_encode(packet->publicsharedkey, 32, tmp, 64); tmp[64] = '\0'; debug_log(DEBUG_DEBUG, "dnscurve_analyze_query(): DNSCurve shared secret: '%s'\n", tmp); } if (crypto_box_curve25519xsalsa20poly1305_open_afternm( sandbox, sandbox, sandboxlen, fullnonce, packet->publicsharedkey) == -1) { debug_log(DEBUG_WARN, "dnscurve_analyze_query(): DNSCurve TXT query unable to open cryptobox\n"); return 1; } entry->packetsize = sandboxlen - 32; // If the inner packet is smaller than 12 bytes, it has no DNS header, so bail out: if (entry->packetsize < 2) { debug_log(DEBUG_ERROR, "dnscurve_analyze_query(): packet inside TXT format packet too small\n"); goto wrong; } // We are now sure we have received a DNSCurve TXT packet: if (entry->buffer[2] & 1) packet->type = DNS_DNSCURVE_TXT_RD_SET; else packet->type = DNS_DNSCURVE_TXT_RD_UNSET; // Allocate memory to store the query name: packet->qnamelen = pos - 12; packet->qname = (uint8_t *) malloc(packet->qnamelen * sizeof(uint8_t)); if (!packet->qname) { debug_log(DEBUG_ERROR, "dnscurve_analyze_query(): no memory for qname\n"); goto wrong; } memcpy(packet->qname, entry->buffer + 12, packet->qnamelen); // Now copy the plain text back to the buffer, and set the client nonce: memcpy(packet->nonce, fullnonce, 12); memcpy(entry->buffer, sandbox + 32, sandboxlen - 32); // Fetch the inner packet id: packet->srcinsidetxid = (entry->buffer[0] << 8) + entry->buffer[1]; debug_log(DEBUG_INFO, "dnscurve_analyze_query(): DNSCurve TXT query received (packetsize = %zd)\n", entry->packetsize); return 1; wrong: return 0; } int dnscurve_reply_streamlined_query(event_entry_t *general_entry) { struct event_general_entry *entry = &general_entry->general; struct dns_packet_t *packet = &entry->dns; uint8_t fullnonce[24], sandbox[4096]; ev_tstamp time; int result; size_t sandboxlen = sizeof(sandbox); if (packet->type != DNS_DNSCURVE_STREAMLINED) goto wrong; if (sandboxlen < entry->packetsize + 32) { debug_log(DEBUG_ERROR, "dnscurve_reply_streamlined_query(): sandboxlen (%zd) < entry->packetsize (%zd)\n", sandboxlen, entry->packetsize + 32); goto wrong; } // To apply the streamline format header, we need 32 bytes extra, let's // see if there is space for that: if (entry->packetsize + 32 > entry->bufferlen) { debug_log(DEBUG_ERROR, "dnscurve_reply_streamlined_query(): buffer is not big enough\n"); goto wrong; } // Copy the entire packet into the sandbox, clear however the first 32 bytes: memset(sandbox, 0, 32); memcpy(sandbox + 32, entry->buffer, entry->packetsize); // Set everything for the encryption step: memcpy(fullnonce, packet->nonce, 12); time = ev_now(event_default_loop); misc_crypto_nonce(fullnonce + 12, &time, sizeof(time)); result = dnscurve_get_shared_secret(packet); if ((result < 0) || packet->ispublic) { debug_log(DEBUG_ERROR, "dnscurve_reply_streamlined_query(): DNSCurve streamlined response unable to get shared secret (code = %d)\n", result); return 1; } if (debug_level >= DEBUG_DEBUG) { char tmp[65]; misc_hex_encode(packet->publicsharedkey, 32, tmp, 64); tmp[64] = '\0'; debug_log(DEBUG_DEBUG, "dnscurve_reply_streamlined_query(): DNSCurve shared secret: '%s'\n", tmp); } if (crypto_box_curve25519xsalsa20poly1305_afternm( sandbox, sandbox, entry->packetsize + 32, fullnonce, packet->publicsharedkey) != 0) { debug_log(DEBUG_ERROR, "dnscurve_reply_streamlined_query(): encryption failed\n"); goto wrong; } // And finally make the streamlined packet: memcpy(entry->buffer, "R6fnvWJ8", 8); memcpy(entry->buffer + 8, fullnonce, 24); memcpy(entry->buffer + 32, sandbox + 16, entry->packetsize + 16); entry->packetsize += 48; debug_log(DEBUG_INFO, "dnscurve_reply_streamlined_query(): done encryption, ready to send (%zd bytes)\n", entry->packetsize); return 1; wrong: return 0; } int dnscurve_reply_txt_query(event_entry_t *general_entry) { struct event_general_entry *entry = &general_entry->general; struct dns_packet_t *packet = &entry->dns; uint8_t fullnonce[24], sandbox[4096]; uint16_t tmpshort; ev_tstamp time; size_t sandboxlen = sizeof(sandbox), pos, rrdatalen; int result; if ((packet->type != DNS_DNSCURVE_TXT_RD_SET) && (packet->type != DNS_DNSCURVE_TXT_RD_UNSET)) goto wrong; if (sandboxlen < entry->packetsize + 32) { debug_log(DEBUG_ERROR, "dnscurve_reply_txt_query(): sandboxlen (%zd) < entry->packetsize (%zd)\n", sandboxlen, entry->packetsize + 32); goto wrong; } memcpy(&tmpshort, entry->buffer, 2); tmpshort = ntohs(tmpshort); if (tmpshort != packet->srcinsidetxid) { debug_log(DEBUG_ERROR, "dnscurve_reply_txt_query(): received inner txid differs!\n"); goto wrong; } // Copy the entire packet into the sandbox, clear however the first 32 bytes: memset(sandbox, 0, 32); memcpy(sandbox + 32, entry->buffer, entry->packetsize); // Now write the streamline header: memcpy(fullnonce, packet->nonce, 12); time = ev_now(event_default_loop); misc_crypto_nonce(fullnonce + 12, &time, sizeof(time)); result = dnscurve_get_shared_secret(packet); if ((result < 0) || packet->ispublic) { debug_log(DEBUG_ERROR, "dnscurve_reply_txt_query(): DNSCurve streamlined response unable to get shared secret\n"); return 1; } if (debug_level >= DEBUG_DEBUG) { char tmp[65]; misc_hex_encode(packet->publicsharedkey, 32, tmp, 64); tmp[64] = '\0'; debug_log(DEBUG_DEBUG, "dnscurve_reply_txt_query(): DNSCurve shared secret: '%s'\n", tmp); } if (crypto_box_curve25519xsalsa20poly1305_afternm( sandbox, sandbox, entry->packetsize + 32, fullnonce, packet->publicsharedkey) != 0) { debug_log(DEBUG_ERROR, "dnscurve_reply_txt_query(): encryption failed\n"); goto wrong; } // Now we have in sandbox[16..16+entry->packetsize-1] the encrypted packet. // Set the server nonce in sandbox[4..16-1]: memcpy(sandbox + 4, fullnonce + 12, 12); entry->packetsize += 16 + 12; // 16 = offset encryption, 12 = server nonce // Let's build a response TXT packet inside buffer: tmpshort = htons(packet->srctxid); memcpy(entry->buffer, &tmpshort, 2); if (packet->type == DNS_DNSCURVE_TXT_RD_SET) { memcpy(entry->buffer + 2, "\x85", 1); } else { memcpy(entry->buffer + 2, "\x84", 1); } memcpy(entry->buffer + 3, "\x00" "\x00\x01" "\x00\x01" "\x00\x00" "\x00\x00", 9); // Check if there is memory available: if (entry->bufferlen < (12 + (unsigned int) packet->qnamelen)) goto wrong; memcpy(entry->buffer + 12, packet->qname, packet->qnamelen); pos = 12 + packet->qnamelen; if (entry->bufferlen < pos + 14) goto wrong; memcpy(entry->buffer + pos, "\x00\x10" // question type: TXT "\x00\x01" // question class: IN "\xc0\x0c" // pointer to qname in question part "\x00\x10" // response RR type: TXT "\x00\x01" // response RR class: IN "\x00\x00\x00\x00" // response RR TTL: 0 , 14); pos += 14; // Now start the RDATA field, by first specifying the size, that includes all the size tokens: rrdatalen = entry->packetsize + ((entry->packetsize + 254) / 255); if (entry->bufferlen < pos + 2 + rrdatalen) { debug_log(DEBUG_ERROR, "dnscurve_reply_txt_query(): buffer too small (before doing rrdata split)\n"); goto wrong; } tmpshort = htons(rrdatalen); memcpy(entry->buffer + pos, &tmpshort, 2); pos += 2; // Start the split-up of RDATA in 255 byte parts (the server nonce + the crypto box): unsigned int todo = entry->packetsize, last = 4; // 4 is the offset of the sandbox uint8_t labelsize; while (todo) { labelsize = 255; if (todo < 255) labelsize = todo; *(entry->buffer + pos) = labelsize; // This fits, due to fact we checked this when RR data length was calculated: memcpy(entry->buffer + pos + 1, sandbox + last, labelsize); pos += labelsize + 1; last += labelsize; todo -= labelsize; } entry->packetsize = pos; debug_log(DEBUG_INFO, "dnscurve_reply_txt_query(): done encryption, ready to send (%zd bytes)\n", entry->packetsize); return 1; wrong: debug_log(DEBUG_ERROR, "dnscurve_reply_txt_query(): bailed out, probably due to memory errors\n"); return 0; } curvedns-curvedns-0.87/dnscurve.h000066400000000000000000000040531150631715100171340ustar00rootroot00000000000000/* * Copyright 2010 CurveDNS Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY CurveDNS Project ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CurveDNS Project OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of CurveDNS Project. * */ /* * $Id$ * $Author$ * $Date$ * $Revision$ */ #ifndef DNSCURVE_H_ #define DNSCURVE_H_ #include #include #include #include #include #include "crypto_box_curve25519xsalsa20poly1305.h" #include "debug.h" #include "event.h" extern int dnscurve_init(); extern int dnscurve_analyze_query(event_entry_t *); extern int dnscurve_reply_streamlined_query(event_entry_t *); extern int dnscurve_reply_txt_query(event_entry_t *); #endif /* DNSCURVE_H_ */ curvedns-curvedns-0.87/event.h000066400000000000000000000112451150631715100164250ustar00rootroot00000000000000/* * Copyright 2010 CurveDNS Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY CurveDNS Project ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CurveDNS Project OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of CurveDNS Project. * */ /* * $Id$ * $Author$ * $Date$ * $Revision$ */ #ifndef EVENT_H_ #define EVENT_H_ #include #include #include #include #include #include "ip.h" #include "cache_hashtable.h" typedef enum { EVENT_UDP_EXT_READING = 0, EVENT_UDP_EXT_WRITING, EVENT_UDP_INT_READING, EVENT_UDP_INT_WRITING, } event_udp_state_t; typedef enum { EVENT_TCP_EXT_READING_INIT = 0, EVENT_TCP_EXT_READING_MORE, EVENT_TCP_EXT_WRITING_INIT, EVENT_TCP_EXT_WRITING_MORE, EVENT_TCP_INT_READING_INIT, EVENT_TCP_INT_READING_MORE, EVENT_TCP_INT_WRITING_INIT, EVENT_TCP_INT_WRITING_MORE, } event_tcp_state_t; typedef enum { DNS_NON_DNSCURVE = 0, DNS_DNSCURVE_STREAMLINED, DNS_DNSCURVE_TXT_RD_UNSET, DNS_DNSCURVE_TXT_RD_SET, } dns_packet_type_t; struct dns_packet_t { dns_packet_type_t type; uint16_t srctxid; uint16_t dsttxid; // DNSCurve stuff: uint16_t srcinsidetxid; // the source id of the inner packet (when type == DNSCURVE_TXT_RD_*) uint8_t ispublic; // indicates whether publicsharedkey is public key (1) or not (0) uint8_t publicsharedkey[33]; // 32-byte public key OR shared key + 0-byte (needed for critbit uint8_t nonce[12]; uint16_t qnamelen; uint8_t *qname; }; struct event_general_entry { ip_protocol_t protocol; anysin_t address; uint8_t *buffer; size_t bufferlen; size_t packetsize; struct dns_packet_t dns; }; struct event_udp_entry { ip_protocol_t protocol; anysin_t address; uint8_t *buffer; size_t bufferlen; size_t packetsize; struct dns_packet_t dns; /* TILL HERE EVENT_UDP_ENTRY == EVENT_TCP_ENTRY == EVENT_GENERAL_ENTRY ALIGNED */ struct ip_socket_t *sock; uint8_t retries; ev_io read_int_watcher; ev_timer timeout_int_watcher; event_udp_state_t state; }; struct event_tcp_entry { ip_protocol_t protocol; anysin_t address; uint8_t *buffer; size_t bufferlen; size_t packetsize; struct dns_packet_t dns; /* TILL HERE EVENT_UDP_ENTRY == EVENT_TCP_ENTRY == EVENT_GENERAL_ENTRY ALIGNED */ size_t bufferat; int intsock; int extsock; ev_io write_watcher; ev_io read_watcher; ev_timer timeout_watcher; event_tcp_state_t state; }; typedef union { struct event_general_entry general; struct event_udp_entry udp; struct event_tcp_entry tcp; } event_entry_t; extern struct ev_loop *event_default_loop; extern int event_init(); extern void event_worker(); /* general stuff */ extern void event_cleanup_entry(struct ev_loop *, event_entry_t *); /* TCP stuff */ extern void event_tcp_startstop_watchers(struct ev_loop *, int); extern void event_cleanup_tcp_entry(struct ev_loop *, struct event_tcp_entry *); extern void event_tcp_accept_cb(struct ev_loop *, ev_io *, int); extern void event_tcp_read_cb(struct ev_loop *, ev_io *, int); extern void event_tcp_write_cb(struct ev_loop *, ev_io *, int); extern void event_tcp_timeout_cb(struct ev_loop *, ev_timer *, int); /* UDP stuff */ extern void event_cleanup_udp_entry(struct ev_loop *, struct event_udp_entry *); extern void event_udp_ext_cb(struct ev_loop *, ev_io *, int); extern void event_udp_int_cb(struct ev_loop *, ev_io *, int); extern void event_udp_timeout_cb(struct ev_loop *, ev_timer *, int); #endif /* EVENT_H_ */ curvedns-curvedns-0.87/event_main.c000066400000000000000000000136341150631715100174300ustar00rootroot00000000000000/* * Copyright 2010 CurveDNS Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY CurveDNS Project ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CurveDNS Project OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of CurveDNS Project. * */ /* * $Id$ * $Author$ * $Date$ * $Revision$ */ #include "event.h" #include "cache_hashtable.h" struct ev_loop *event_default_loop = NULL; static struct ev_io *udp_watchers = NULL; static struct ev_io *tcp_watchers = NULL; static int watchers_count; /* as udp_watchers_count = tcp_watchers_count = watchers_count */ static struct ev_signal signal_watcher_hup; static struct ev_signal signal_watcher_int; static struct ev_signal signal_watcher_term; void event_cleanup_entry(struct ev_loop *loop, event_entry_t *entry) { struct event_general_entry *general_entry; if (entry) { general_entry = &entry->general; if (general_entry->dns.qname) { free(general_entry->dns.qname); general_entry->dns.qname = NULL; } if (general_entry->buffer) { free(general_entry->buffer); general_entry->buffer = NULL; } if (general_entry->protocol == IP_PROTOCOL_UDP) { event_cleanup_udp_entry(loop, &entry->udp); } else if (general_entry->protocol == IP_PROTOCOL_TCP) { event_cleanup_tcp_entry(loop, &entry->tcp); } } } // Starts the accept watchers if startstop = 1, stops them if startstop = 0 void event_tcp_startstop_watchers(struct ev_loop *loop, int startstop) { int i; for (i = 0; i < watchers_count; i++) { if (startstop) ev_io_start(loop, &tcp_watchers[i]); else ev_io_stop(loop, &tcp_watchers[i]); } } static void event_signal_cb(struct ev_loop *loop, ev_signal *w, int revent) { if (!(revent & EV_SIGNAL)) return; if (w->signum == SIGHUP) { debug_log(DEBUG_FATAL, "event_signal_cb(): received SIGHUP - clearing cache\n"); cache_stats(dnscurve_cache); cache_empty(dnscurve_cache); } else if ((w->signum == SIGINT) || (w->signum == SIGTERM)) { debug_log(DEBUG_FATAL, "event_signal_cb(): received %s - cleaning up nicely and quitting\n", (w->signum == SIGINT) ? "SIGINT" : "SIGTERM"); ev_unloop(EV_DEFAULT_ EVUNLOOP_ALL); cache_destroy(dnscurve_cache); ip_close(); } else { debug_log(DEBUG_WARN, "event_signal_cb(): received unhandled signal\n"); } } int event_init() { int i, j; // Fetch the default loop: event_default_loop = ev_default_loop(0); debug_log(DEBUG_DEBUG, "event_init(): memory size of event_entry_t: %zd\n", sizeof(event_entry_t)); debug_log(DEBUG_INFO, "event_init(): event backend in use: %d (1 = select, 2 = poll, 4 = epoll, 8 = kqueue, 16 = /dev/poll, 32 = port)\n", ev_backend(event_default_loop)); // Attaching signal handlers: ev_signal_init(&signal_watcher_hup, event_signal_cb, SIGHUP); ev_signal_init(&signal_watcher_int, event_signal_cb, SIGINT); ev_signal_init(&signal_watcher_term, event_signal_cb, SIGTERM); ev_signal_start(event_default_loop, &signal_watcher_hup); ev_signal_start(event_default_loop, &signal_watcher_int); ev_signal_start(event_default_loop, &signal_watcher_term); // Now allocate memory for each of the workers (global_sockets_count is always even): watchers_count = (int) (global_ip_sockets_count / 2); udp_watchers = (struct ev_io *) calloc(watchers_count, sizeof(struct ev_io)); if (!udp_watchers) goto wrong; tcp_watchers = (struct ev_io *) calloc(watchers_count, sizeof(struct ev_io)); if (!tcp_watchers) goto wrong; // Initialize watchers and connect them to the loop: char s[52]; for (i = 0, j = 0; i < global_ip_sockets_count; i++) { ip_address_total_string(global_ip_sockets[i].address, s, sizeof(s)); if (global_ip_sockets[i].protocol == IP_PROTOCOL_UDP) { // UDP socket debug_log(DEBUG_INFO, "event_init(): udp_watchers[%d] = UDP socket on %s (fd = %d)\n", j, s, global_ip_sockets[i].fd); udp_watchers[j].data = &global_ip_sockets[i]; ev_io_init(&udp_watchers[j], event_udp_ext_cb, global_ip_sockets[i].fd, EV_READ); ev_io_start(event_default_loop, &udp_watchers[j]); } else if (global_ip_sockets[i].protocol == IP_PROTOCOL_TCP) { // TCP socket debug_log(DEBUG_INFO, "event_init(): tcp_watchers[%d] = TCP socket on %s (fd = %d)\n", j, s, global_ip_sockets[i].fd); ev_io_init(&tcp_watchers[j], event_tcp_accept_cb, global_ip_sockets[i].fd, EV_READ); ev_io_start(event_default_loop, &tcp_watchers[j]); } if (i % 2) j++; } return 1; wrong: // Bails only out when there's something with memory, so nothing to do with sockets: if (udp_watchers) free(udp_watchers); if (tcp_watchers) free(tcp_watchers); return 0; } void event_worker() { debug_log(DEBUG_FATAL, "event_worker(): starting the event loop\n"); ev_loop(event_default_loop, 0); } curvedns-curvedns-0.87/event_tcp.c000066400000000000000000000367301150631715100172740ustar00rootroot00000000000000/* * Copyright 2010 CurveDNS Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY CurveDNS Project ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CurveDNS Project OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of CurveDNS Project. * */ /* * $Id$ * $Author$ * $Date$ * $Revision$ */ #include "event.h" #include "dns.h" static int event_tcp_number_connections = 0; void event_cleanup_tcp_entry(struct ev_loop *loop, struct event_tcp_entry *entry) { if (entry) { if (ev_is_active(&entry->timeout_watcher)) ev_timer_stop(loop, &entry->timeout_watcher); if (ev_is_active(&entry->read_watcher)) ev_io_stop(loop, &entry->read_watcher); if (ev_is_active(&entry->write_watcher)) ev_io_stop(loop, &entry->write_watcher); if (entry->extsock >= 0) { ip_tcp_close(entry->extsock); entry->extsock = -1; } if (entry->intsock >= 0) { ip_tcp_close(entry->intsock); entry->intsock = -1; } if (event_tcp_number_connections-- == global_ip_tcp_max_number_connections) event_tcp_startstop_watchers(loop, 1); free(entry); } } void event_tcp_timeout_cb(struct ev_loop *loop, ev_timer *w, int revent) { event_entry_t *general_entry = (event_entry_t *) w->data; struct event_tcp_entry *entry = (struct event_tcp_entry *) &general_entry->tcp; uint8_t internal; if (!(revent & EV_TIMEOUT)) return; if ((entry->state == EVENT_TCP_INT_WRITING_INIT) || (entry->state == EVENT_TCP_INT_WRITING_MORE) || (entry->state == EVENT_TCP_INT_READING_INIT) || (entry->state == EVENT_TCP_INT_READING_MORE)) { internal = 1; } else if ((entry->state == EVENT_TCP_EXT_WRITING_INIT) || (entry->state == EVENT_TCP_EXT_WRITING_MORE) || (entry->state == EVENT_TCP_EXT_READING_INIT) || (entry->state == EVENT_TCP_EXT_READING_MORE)) { internal = 0; } else { debug_log(DEBUG_WARN, "event_tcp_timeout_cb(): not in the right state for TCP connection\n"); goto wrong; } if (internal) { if ((entry->state == EVENT_TCP_INT_READING_INIT) || (entry->state == EVENT_TCP_INT_READING_MORE)) { debug_log(DEBUG_INFO, "event_tcp_timeout_cb(): timeout while waiting for internal read\n"); } else if ((entry->state == EVENT_TCP_INT_WRITING_INIT) || (entry->state == EVENT_TCP_INT_WRITING_MORE)) { debug_log(DEBUG_INFO, "event_tcp_timeout_cb(): timeout while waiting for internal write\n"); } else { debug_log(DEBUG_WARN, "event_tcp_timeout_cb(): received unknown timeout while being notified for internal timeout\n"); } } else { if ((entry->state == EVENT_TCP_EXT_READING_INIT) || (entry->state == EVENT_TCP_EXT_READING_MORE)) { debug_log(DEBUG_INFO, "event_tcp_timeout_cb(): timeout while waiting for external read\n"); } else if ((entry->state == EVENT_TCP_EXT_WRITING_INIT) || (entry->state == EVENT_TCP_EXT_WRITING_MORE)) { debug_log(DEBUG_INFO, "event_tcp_timeout_cb(): timeout while waiting for external write\n"); } else { debug_log(DEBUG_WARN, "event_tcp_timeout_cb(): received unknown timeout while being notified for external timeout\n"); } } wrong: event_cleanup_entry(loop, general_entry); } void event_tcp_write_cb(struct ev_loop *loop, ev_io *w, int revent) { event_entry_t *general_entry = (event_entry_t *) w->data; struct event_tcp_entry *entry = (struct event_tcp_entry *) &general_entry->tcp; uint8_t *buffer, initial = 1, internal = 1, initbuf[2]; // houses the first two length bytes ssize_t bufferleft, buffersent; if (!(revent & EV_WRITE)) goto wrong; if (!entry) goto wrong; if (!entry->buffer) { debug_log(DEBUG_ERROR, "event_tcp_write_cb(): no buffer for TCP connection\n"); goto wrong; } if (entry->bufferat >= entry->bufferlen) { debug_log(DEBUG_ERROR, "event_tcp_write_cb(): buffer pointer after buffer space for TCP connection\n"); goto wrong; } if (entry->state == EVENT_TCP_INT_WRITING_INIT) { internal = 1; initial = 1; } else if (entry->state == EVENT_TCP_INT_WRITING_MORE) { internal = 1; initial = 0; } else if (entry->state == EVENT_TCP_EXT_WRITING_INIT) { internal = 0; initial = 1; } else if (entry->state == EVENT_TCP_EXT_WRITING_MORE) { internal = 0; initial = 0; } else goto wrong; debug_log(DEBUG_DEBUG, "event_tcp_write_cb(): received write event for %s TCP connection (bufferat = %zd, packetsize = %zd)\n", internal ? "internal" : "external", entry->bufferat, entry->packetsize); if (initial) { initbuf[0] = entry->packetsize >> 8; initbuf[1] = entry->packetsize & 0xff; buffer = initbuf + entry->bufferat; bufferleft = 2 - entry->bufferat; } else { buffer = entry->buffer + entry->bufferat; bufferleft = entry->packetsize - entry->bufferat; } buffersent = send(internal ? entry->intsock : entry->extsock, buffer, bufferleft, 0); if (buffersent == -1) { // As the socket is non-blocking, the kernel might not be ready, so stop and be notified again: if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) return; debug_log(DEBUG_WARN, "event_tcp_write_cb(): writing on %s TCP connection failed (%s)\n", internal ? "internal" : "external", strerror(errno)); goto wrong; } entry->bufferat += buffersent; if (initial) { if (entry->bufferat == 2) { entry->bufferat = 0; if (internal) entry->state = EVENT_TCP_INT_WRITING_MORE; else entry->state = EVENT_TCP_EXT_WRITING_MORE; } // Reset the timer: ev_timer_again(loop, &entry->timeout_watcher); return; } if (entry->bufferat < entry->packetsize) { debug_log(DEBUG_DEBUG, "event_tcp_write_cb(): buffer not full yet, so waiting for next batch\n"); ev_timer_again(loop, &entry->timeout_watcher); return; } // We are done sending, all the packets, clear everything up: if (internal) { debug_log(DEBUG_INFO, "event_tcp_write_cb(): we have sent the entire packet towards authoritative name server, packetsize = %zu\n", entry->packetsize); ev_io_stop(loop, &entry->write_watcher); // So we are ready for receiving, attach a watcher to the socket // for reading and reset the timeout (which is still okay, as we // are receiving from the authoritative server): entry->state = EVENT_TCP_INT_READING_INIT; entry->bufferat = 0; entry->packetsize = 0; ev_io_start(loop, &entry->read_watcher); ev_timer_again(loop, &entry->timeout_watcher); } else { debug_log(DEBUG_INFO, "event_tcp_write_cb(): we have sent the entire packet towards the client, packetsize = %zu, listening again\n", entry->packetsize); ev_io_stop(loop, &entry->write_watcher); // We are done sending info back to client. According to RFC, the client // should close the connection, so wait for a read: entry->state = EVENT_TCP_EXT_READING_INIT; entry->bufferat = 0; entry->packetsize = 0; ev_io_start(loop, &entry->read_watcher); ev_timer_again(loop, &entry->timeout_watcher); } return; wrong: debug_log(DEBUG_WARN, "event_tcp_write_cb(): catched wrong during %s TCP connection\n", internal ? "internal" : "external"); event_cleanup_entry(loop, general_entry); return; } void event_tcp_read_cb(struct ev_loop *loop, ev_io *w, int revent) { event_entry_t *general_entry = (event_entry_t *) w->data; struct event_tcp_entry *entry = (struct event_tcp_entry *) &general_entry->tcp; uint8_t *buffer, initial, internal; ssize_t bufferneeded, packetlen; if (!(revent & EV_READ)) goto wrong; if (!entry) goto wrong; if (!entry->buffer) goto wrong; if (entry->bufferat >= entry->bufferlen) goto wrong; if (entry->state == EVENT_TCP_INT_READING_INIT) { internal = 1; initial = 1; } else if (entry->state == EVENT_TCP_INT_READING_MORE) { internal = 1; initial = 0; } else if (entry->state == EVENT_TCP_EXT_READING_INIT) { internal = 0; initial = 1; } else if (entry->state == EVENT_TCP_EXT_READING_MORE) { internal = 0; initial = 0; } else goto wrong; buffer = entry->buffer + entry->bufferat; bufferneeded = (initial ? 2 : entry->packetsize) - entry->bufferat; packetlen = recv(internal ? entry->intsock : entry->extsock, buffer, bufferneeded, 0); if (packetlen < 1) { if (packetlen == -1) { // Our non-blocking socket, could not be ready, if so, wait to be notified another time: if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) return; debug_log(DEBUG_WARN, "event_tcp_read_cb(): failed to receive TCP data\n"); } else { debug_log(DEBUG_DEBUG, "event_tcp_read_cb(): EOF with recv() on TCP data (%s closed connection)\n", internal ? "authoritative server" : "client"); } goto wrong; } debug_log(DEBUG_INFO, "event_tcp_read_cb(): received response of %zd byte(s) (bufferat = %zd)\n", packetlen, entry->bufferat); entry->bufferat += packetlen; if (initial) { if (entry->bufferat == 2) { entry->packetsize = (entry->buffer[0] << 8) + entry->buffer[1]; if (entry->packetsize > entry->bufferlen) { debug_log(DEBUG_WARN, "event_tcp_read_cb(): about to receive a DNS TCP packet of %zu bytes, while we arranged a buffer of only %zu bytes\n", entry->packetsize, entry->bufferlen); goto wrong; } debug_log(DEBUG_INFO, "event_tcp_read_cb(): about to receive a DNS TCP packet of %zu bytes\n", entry->packetsize); entry->bufferat = 0; if (internal) entry->state = EVENT_TCP_INT_READING_MORE; else entry->state = EVENT_TCP_EXT_READING_MORE; } ev_timer_again(loop, &entry->timeout_watcher); return; } if (entry->bufferat < entry->packetsize) { debug_log(DEBUG_DEBUG, "event_tcp_read_cb(): %s buffer not full yet, so waiting for next batch\n", internal ? "internal" : "external"); ev_timer_again(loop, &entry->timeout_watcher); return; } debug_log(DEBUG_INFO, "event_tcp_read_cb(): received entire packet from %s, packetsize = %zu\n", internal ? "server" : "client", entry->packetsize); // Done with reading, so stop the watchers involved: if (internal) { // Stop listening on the internal connection: ev_io_stop(loop, &entry->read_watcher); ev_timer_stop(loop, &entry->timeout_watcher); // We received the answer from the authoritative name server, // so close this connection: ip_tcp_close(entry->intsock); entry->intsock = -1; // Let's see what kind of packet we are dealing with: if (!dns_analyze_reply_query(general_entry)) { debug_log(DEBUG_WARN, "event_tcp_read_cb(): analyzing of DNS response failed\n"); goto wrong; } // Now forward the packet towards the authoritative name server: if (!dns_reply_query_tcp(general_entry)) { debug_log(DEBUG_WARN, "event_tcp_read_cb(): failed to reply the response towards the client\n"); goto wrong; } // We start to send data again, back to the client: entry->state = EVENT_TCP_EXT_WRITING_INIT; entry->bufferat = 0; ev_timer_set(&entry->timeout_watcher, 0., global_ip_tcp_external_timeout); ev_io_set(&entry->write_watcher, entry->extsock, EV_WRITE); ev_io_set(&entry->read_watcher, entry->extsock, EV_READ); ev_io_start(loop, &entry->write_watcher); ev_timer_again(loop, &entry->timeout_watcher); } else { // Reading from client done, stop the watchers + timeout: ev_io_stop(loop, &entry->read_watcher); ev_timer_stop(loop, &entry->timeout_watcher); // Let's see what kind of packet we are dealing with: if (!dns_analyze_query(general_entry)) { debug_log(DEBUG_WARN, "event_tcp_read_cb(): analyzing of DNS query failed\n"); goto wrong; } // Now forward the packet towards the authoritative name server: if (!dns_forward_query_tcp(general_entry)) { debug_log(DEBUG_WARN, "event_tcp_read_cb(): failed to forward query towards authoritative name server\n"); goto wrong; } // Now get ready for sending a TCP query towards the authoritative name server: entry->state = EVENT_TCP_INT_WRITING_INIT; entry->bufferat = 0; ev_timer_set(&entry->timeout_watcher, 0., global_ip_internal_timeout); ev_io_set(&entry->write_watcher, entry->intsock, EV_WRITE); ev_io_set(&entry->read_watcher, entry->intsock, EV_READ); ev_io_start(loop, &entry->write_watcher); ev_timer_again(loop, &entry->timeout_watcher); } return; wrong: event_cleanup_entry(loop, general_entry); return; } void event_tcp_accept_cb(struct ev_loop *loop, ev_io *w, int revent) { event_entry_t *general_entry = NULL; struct event_tcp_entry *entry = NULL; socklen_t addresslen = sizeof(anysin_t); // We will get notified when there is an accept available, so // set up an entry: general_entry = (event_entry_t *) malloc(sizeof(event_entry_t)); if (!general_entry) goto wrong; memset(general_entry, 0, sizeof(event_entry_t)); entry = &general_entry->tcp; // Now accept the TCP connection: errno = 0; entry->extsock = accept(w->fd, (struct sockaddr *) &entry->address.sa, &addresslen); if (entry->extsock == -1) { if (errno == EAGAIN) return; debug_log(DEBUG_WARN, "event_tcp_accept_cb(): unable to accept TCP connection\n"); goto wrong; } else if (errno) { debug_log(DEBUG_WARN, "event_tcp_accept_cb(): unable to accept TCP connection (%s)\n", strerror(errno)); goto wrong; } if (++event_tcp_number_connections >= global_ip_tcp_max_number_connections) { debug_log(DEBUG_INFO, "event_tcp_accept_cb(): reached maximum number of TCP connections, temporarily waiting\n"); event_tcp_startstop_watchers(loop, 0); } // We have a new connection, set up the buffer: entry->buffer = (uint8_t *) malloc(global_ip_tcp_buffersize); if (!entry->buffer) goto wrong; memset(entry->buffer, 0, global_ip_tcp_buffersize); entry->bufferlen = global_ip_tcp_buffersize; entry->protocol = IP_PROTOCOL_TCP; entry->state = EVENT_TCP_EXT_READING_INIT; entry->intsock = -1; // Set the general entry pointer in the watcher's data pointer: entry->read_watcher.data = general_entry; entry->write_watcher.data = general_entry; entry->timeout_watcher.data = general_entry; // Initialize the timers (for timeouts), and the i/o watchers for the external socket: ev_timer_init(&entry->timeout_watcher, event_tcp_timeout_cb, 0., global_ip_tcp_external_timeout); ev_io_init(&entry->write_watcher, event_tcp_write_cb, entry->extsock, EV_WRITE); ev_io_init(&entry->read_watcher, event_tcp_read_cb, entry->extsock, EV_READ); if (debug_level >= DEBUG_INFO) { char s[52]; ip_address_total_string(&entry->address, s, sizeof(s)); debug_log(DEBUG_INFO, "event_tcp_accept_cb(): received TCP DNS request from %s\n", s); } // Now start the read watcher and an associated timeout: ev_io_start(loop, &entry->read_watcher); ev_timer_again(loop, &entry->timeout_watcher); return; wrong: event_cleanup_entry(loop, general_entry); return; } curvedns-curvedns-0.87/event_udp.c000066400000000000000000000160441150631715100172720ustar00rootroot00000000000000/* * Copyright 2010 CurveDNS Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY CurveDNS Project ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CurveDNS Project OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of CurveDNS Project. * */ /* * $Id$ * $Author$ * $Date$ * $Revision$ */ #include "event.h" #include "dns.h" void event_cleanup_udp_entry(struct ev_loop *loop, struct event_udp_entry *entry) { if (entry) { if (ev_is_active(&entry->read_int_watcher)) ev_io_stop(loop, &entry->read_int_watcher); if (entry->read_int_watcher.fd >= 0) close(entry->read_int_watcher.fd); if (ev_is_active(&entry->timeout_int_watcher)) ev_timer_stop(loop, &entry->timeout_int_watcher); free(entry); } } void event_udp_timeout_cb(struct ev_loop *loop, ev_timer *w, int revent) { event_entry_t *general_entry = (event_entry_t *) w->data; struct event_udp_entry *entry = (struct event_udp_entry *) &general_entry->udp; if (!(revent & EV_TIMEOUT)) goto wrong; if (entry->state != EVENT_UDP_INT_WRITING) goto wrong; // Check if we reached maximum number of retries: if (entry->retries >= global_ip_udp_retries) { debug_log(DEBUG_INFO, "event_udp_timeout_cb(): reached maximum number of UDP retries\n"); goto wrong; } // If not, close down the socket, i/o watcher and timeout, and try to send it again: ev_io_stop(loop, &entry->read_int_watcher); ev_timer_stop(loop, &entry->timeout_int_watcher); if (entry->read_int_watcher.fd >= 0) { close(entry->read_int_watcher.fd); entry->read_int_watcher.fd = -1; } if (!dns_forward_query_udp(general_entry)) { debug_log(DEBUG_WARN, "event_udp_timeout_cb(): unable to resend query to authoritative server\n"); goto wrong; } return; /* nxdomain: ev_io_stop(loop, &entry->read_int_watcher); ev_timer_stop(loop, &entry->timeout_int_watcher); if (!dns_reply_nxdomain_query_udp(general_entry)) { debug_log(DEBUG_WARN, "event_udp_timeout_cb(): unable to send NXDOMAIN response\n"); } */ wrong: event_cleanup_entry(loop, general_entry); return; } void event_udp_int_cb(struct ev_loop *loop, ev_io *w, int revent) { event_entry_t *general_entry = (event_entry_t *) w->data; struct event_udp_entry *entry = (struct event_udp_entry *) &general_entry->udp; int n; anysin_t address; socklen_t addresslen = sizeof(anysin_t); if (!(revent & EV_READ)) goto wrong; if (general_entry->general.protocol != IP_PROTOCOL_UDP) goto wrong; if (entry->state != EVENT_UDP_INT_WRITING) goto wrong; // We will only receive one UDP packet, and stop the (timeout) watcher: ev_timer_stop(loop, &entry->timeout_int_watcher); ev_io_stop(loop, &entry->read_int_watcher); entry->state = EVENT_UDP_INT_READING; n = recvfrom(w->fd, entry->buffer, global_ip_udp_buffersize, MSG_DONTWAIT, (struct sockaddr *) &address.sa, &addresslen); if (n == -1) { // The ready for reading event will again be triggered... return; } entry->packetsize = n; // We can also close the socket towards the authoritative name server, as we are done: if (entry->read_int_watcher.fd >= 0) { close(entry->read_int_watcher.fd); entry->read_int_watcher.fd = -1; } // Check if the response really came from our target server: if (ip_compare_address(&address, &global_target_address) != 0) { char s[52]; ip_address_total_string(&address, s, sizeof(s)); debug_log(DEBUG_WARN, "event_udp_int_cb(): response is not coming from target address, but from %s\n", s); goto wrong; } // And the same goes for the port: if (ip_compare_port(&address, &global_target_address) != 0) { debug_log(DEBUG_WARN, "event_udp_int_cb(): response is not coming from target address port\n"); goto wrong; } // Now analyze the query (i.e. is it the right one?): if (!dns_analyze_reply_query(general_entry)) { debug_log(DEBUG_WARN, "event_udp_int_cb(): failed to analyze the reply\n"); goto wrong; } // Send the reply through UDP: if (!dns_reply_query_udp(general_entry)) { debug_log(DEBUG_WARN, "event_udp_int_cb(): failed to send the reply\n"); goto wrong; } wrong: // And since we're now done, clear the memory: event_cleanup_entry(loop, general_entry); return; } void event_udp_ext_cb(struct ev_loop *loop, ev_io *w, int revent) { struct ip_socket_t *sock = (struct ip_socket_t *) w->data; event_entry_t *general_entry = NULL; struct event_udp_entry *entry = NULL; ssize_t n; socklen_t addresslen = sizeof(anysin_t); if (!(revent & EV_READ)) return; general_entry = (event_entry_t *) malloc(sizeof(event_entry_t)); if (!general_entry) goto wrong; memset(general_entry, 0, sizeof(event_entry_t)); entry = &general_entry->udp; entry->protocol = IP_PROTOCOL_UDP; entry->buffer = (uint8_t *) malloc(global_ip_udp_buffersize); if (!entry->buffer) goto wrong; entry->bufferlen = global_ip_udp_buffersize; memset(entry->buffer, 0, entry->bufferlen); entry->retries = 0; entry->sock = sock; entry->state = EVENT_UDP_EXT_READING; n = recvfrom(w->fd, entry->buffer, entry->bufferlen, MSG_DONTWAIT, (struct sockaddr *) &entry->address.sa, &addresslen); if (n == -1) { // YYY: maybe an overlap goto wrong; } entry->packetsize = n; if (debug_level >= DEBUG_INFO) { char s[52]; ip_address_total_string(&entry->address, s, sizeof(s)); debug_log(DEBUG_INFO, "event_udp_ext_cb(): received UDP query from %s\n", s); } // Start analyzing the query (is it malformed, or not?): if (!dns_analyze_query(general_entry)) { debug_log(DEBUG_WARN, "event_udp_ext_cb(): analyzing of query failed\n"); goto wrong; } // Now forward the query (through UDP) towards the authoritative name server: if (!dns_forward_query_udp(general_entry)) { debug_log(DEBUG_WARN, "event_udp_ext_cb(): failed to forward query to authoritative name server\n"); goto wrong; } return; wrong: event_cleanup_entry(loop, general_entry); return; } curvedns-curvedns-0.87/ip.c000066400000000000000000000256131150631715100157130ustar00rootroot00000000000000/* * Copyright 2010 CurveDNS Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY CurveDNS Project ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CurveDNS Project OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of CurveDNS Project. * */ /* * $Id$ * $Author$ * $Date$ * $Revision$ */ #include "ip.h" #include "misc.h" #include "curvedns.h" /* Global definitions, that are IP (or: network) related */ struct ip_socket_t *global_ip_sockets = NULL; int global_ip_sockets_count = 0; ev_tstamp global_ip_internal_timeout = 1.2; ev_tstamp global_ip_tcp_external_timeout = 60.0; int global_ip_tcp_max_number_connections = 25; size_t global_ip_tcp_buffersize = 8192; size_t global_ip_udp_buffersize = 4096; uint8_t global_ip_udp_retries = 2; anysin_t global_target_address; socklen_t global_target_address_len; anysin_t global_source_address; static int ip_socket(anysin_t *address, ip_protocol_t protocol) { return socket(address->sa.sa_family, (protocol == IP_PROTOCOL_UDP) ? SOCK_DGRAM : SOCK_STREAM, 0); } static int ip_tcp_listen(int sock) { int n; n = listen(sock, 20); if (n == -1) { debug_log(DEBUG_ERROR, "ip_tcp_listen(): unable to listen on socket (%s)\n", strerror(errno)); return 0; } return 1; } int ip_udp_open(int *sock, anysin_t *address) { *sock = ip_socket(address, IP_PROTOCOL_UDP); if (*sock < 0) goto wrong; if (!ip_nonblock(*sock)) debug_log(DEBUG_WARN, "ip_udp_open(): unable to set socket non-blocking (%s)\n", strerror(errno)); return 1; wrong: if (*sock >= 0) close(*sock); return 0; } int ip_nonblock(int sock) { if (fcntl(sock, F_SETFL, (fcntl(sock, F_GETFL, 0)) | O_NONBLOCK) == -1) return 0; return 1; } int ip_reuse(int sock) { int n = 1; if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &n, sizeof(n)) == -1) return 0; return 1; } int ip_tcp_open(int *sock, anysin_t *address) { *sock = ip_socket(address, IP_PROTOCOL_TCP); if (*sock < 0) goto wrong; if (!ip_nonblock(*sock)) debug_log(DEBUG_WARN, "ip_tcp_open(): unable to set socket non-blocking (%s)\n", strerror(errno)); return 1; wrong: if (*sock >= 0) { close(*sock); *sock = -1; } return 0; } int ip_tcp_close(int sock) { if (sock < 0) goto wrong; shutdown(sock, SHUT_RDWR); close(sock); return 1; wrong: return 0; } int ip_init(anysin_t *addresses, int addresses_count) { int i; global_ip_sockets = (struct ip_socket_t *) calloc(addresses_count * 2, sizeof(struct ip_socket_t)); if (!global_ip_sockets) goto wrong; global_ip_sockets_count = addresses_count * 2; for (i = 0; i < global_ip_sockets_count; i++) global_ip_sockets[i].fd = -1; for (i = 0; i < addresses_count; i++) { int sid = i * 2; // Do UDP bindings: global_ip_sockets[sid].address = &addresses[i]; global_ip_sockets[sid].protocol = IP_PROTOCOL_UDP; if (!ip_udp_open(&global_ip_sockets[sid].fd, &addresses[i])) { debug_log(DEBUG_FATAL, "ip_init(): unable to open UDP socket (%s)\n", strerror(errno)); goto wrong; } if (!ip_reuse(global_ip_sockets[sid].fd)) debug_log(DEBUG_WARN, "ip_init(): unable to set UDP socket to reuse address (%s)\n", strerror(errno)); if (!ip_bind(global_ip_sockets[sid].fd, &addresses[i])) { debug_log(DEBUG_FATAL, "ip_init(): unable to bind UDP socket (%s)\n", strerror(errno)); goto wrong; } // Do TCP bindings: global_ip_sockets[sid+1].address = &addresses[i]; global_ip_sockets[sid+1].protocol = IP_PROTOCOL_TCP; if (!ip_tcp_open(&global_ip_sockets[sid+1].fd, &addresses[i])) { debug_log(DEBUG_FATAL, "ip_init(): unable to open TCP socket (%s)\n", strerror(errno)); goto wrong; } if (!ip_reuse(global_ip_sockets[sid+1].fd)) debug_log(DEBUG_WARN, "ip_init(): unable to set TCP socket to reuse address (%s)\n", strerror(errno)); if (!ip_bind(global_ip_sockets[sid+1].fd, &addresses[i])) { debug_log(DEBUG_FATAL, "ip_init(): unable to bind TCP socket (%s)\n", strerror(errno)); goto wrong; } if (!ip_tcp_listen(global_ip_sockets[sid+1].fd)) { debug_log(DEBUG_FATAL, "ip_init(): unable to listen on TCP socket (%s)\n", strerror(errno)); } } return 1; wrong: ip_close(); return 0; } void ip_close() { int i; if (global_ip_sockets) { for (i = 0; i < global_ip_sockets_count; i++) if (global_ip_sockets[i].fd >= 0) close(global_ip_sockets[i].fd); free(global_ip_sockets); global_ip_sockets = NULL; global_ip_sockets_count = 0; } } // Watch it, only to be used for sending queries to authoritative name server! int ip_bind_random(int sock) { unsigned int i; anysin_t addr; socklen_t addrlen = sizeof(addr); memset(&addr, 0, sizeof(addr)); // See to what kind of socket we have to bind: if (global_target_address.sa.sa_family == AF_INET6) { addr.sa.sa_family = AF_INET6; for (i = 0; i < 10; i++) { if (global_source_address.sa.sa_family != AF_UNSPEC) { memcpy(&(addr.sin6.sin6_addr), &(global_source_address.sin6.sin6_addr), sizeof(addr.sin6.sin6_addr)); } addr.sin6.sin6_port = 1025 + misc_crypto_random(64510); if (bind(sock, (struct sockaddr *) &addr, addrlen) == 0) return 1; } } else { addr.sa.sa_family = AF_INET; for (i = 0; i < 10; i++) { if (global_source_address.sa.sa_family != AF_UNSPEC) { memcpy(&(addr.sin.sin_addr), &(global_source_address.sin.sin_addr), sizeof(addr.sin.sin_addr)); } addr.sin.sin_port = 1025 + misc_crypto_random(64510); if (bind(sock, (struct sockaddr *) &addr, addrlen) == 0) return 1; } } return 0; } int ip_connect(int sock, anysin_t *address) { // The connect is non-blocking, so we continue only if it returns // okay, or the error code is EINPROGRESS (which means the kernel // tries to accomplish it in the background: int result = connect(sock, (struct sockaddr *) address, (address->sa.sa_family == AF_INET) ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6)); if (result == -1) if (errno != EINPROGRESS) return 0; return 1; } int ip_bind(int sock, anysin_t *address) { return (bind(sock, (struct sockaddr *) address, (address->sa.sa_family == AF_INET) ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6)) == 0); } // outcount The number of anysin_t objects that are returned // inip The IP string (like '127.0.0.1,10.0.0.1') // inport The port string (like '53' or '1053') // return Array of anysin_t objects, for each IP in the IP string one anysin_t *ip_multiple_parse(int *outcount, const char *inip, const char *inport) { anysin_t *result = NULL; char *prev, *p; int i, len, found, wasnull; *outcount = 1; len = strlen(inip); for (p = (char *)inip; *p; p++) { if (*p == ',') (*outcount)++; } result = (anysin_t *) calloc(*outcount, sizeof(anysin_t)); if (!result) goto wrong; p = prev = (char *)inip; found = wasnull = 0; for (i = 0; (i <= len) && (found < *outcount); i++, p++) { wasnull = 0; if (*p == '\0') wasnull = 1; if (wasnull || (*p == ',')) { *p = '\0'; if (!ip_parse(&result[found], prev, inport)) goto wrong; if (!wasnull) // make sure prev only points to okay memory prev = p + 1; found++; } } return result; wrong: debug_log(DEBUG_FATAL, "ip_multiple_parse(): failed to parse IP addresses\n"); if (result) free(result); return NULL; } // out addrinfo that represents input // inip IP address (either IPv6 or IPv4) // inport port string // return zero when something went wrong int ip_parse(anysin_t *out, const char *inip, const char *inport) { struct addrinfo hints, *result = NULL; int ret; memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_UNSPEC; hints.ai_flags = AI_PASSIVE | AI_NUMERICHOST; //hints.ai_socktype = SOCK_DGRAM; ret = getaddrinfo(inip, inport, &hints, &result); if (ret < 0) { if (result) freeaddrinfo(result); return 0; } memcpy(out, result->ai_addr, result->ai_addrlen); freeaddrinfo(result); return 1; } int ip_compare_address(anysin_t *a, anysin_t *b) { if (a->sa.sa_family != b->sa.sa_family) return (a->sa.sa_family - b->sa.sa_family); if (a->sa.sa_family == AF_INET) { return (a->sin.sin_addr.s_addr - b->sin.sin_addr.s_addr); } else if (a->sa.sa_family == AF_INET6) { return memcmp( &a->sin6.sin6_addr, &b->sin6.sin6_addr, sizeof(b->sin6.sin6_addr)); } return -1; } int ip_compare_port(anysin_t *a, anysin_t *b) { if (a->sa.sa_family != b->sa.sa_family) return (a->sa.sa_family - b->sa.sa_family); if (a->sa.sa_family == AF_INET) { return (a->sin.sin_port - b->sin.sin_port); } else if (a->sa.sa_family == AF_INET6) { return (a->sin6.sin6_port - b->sin.sin_port); } return -1; } int ip_address_string(const anysin_t *address, char *buf, socklen_t buflen) { memset(buf, 0, buflen); if (address->sa.sa_family == AF_INET) { if (buflen < INET_ADDRSTRLEN) return 0; if (inet_ntop(AF_INET, &address->sin.sin_addr, buf, buflen) == buf) return 1; } else if (address->sa.sa_family == AF_INET6) { if (buflen < INET6_ADDRSTRLEN) return 0; if (inet_ntop(AF_INET6, &address->sin6.sin6_addr, buf, buflen) == buf) return 1; } else { debug_log(DEBUG_WARN, "ip_address_string(): unknown family\n"); } return 0; } int ip_port_integer(const anysin_t *address, uint16_t *port) { if (address->sa.sa_family == AF_INET) { *port = ntohs(address->sin.sin_port); return 1; } else if (address->sa.sa_family == AF_INET6) { *port = ntohs(address->sin6.sin6_port); return 1; } return 0; } int ip_address_total_string(const anysin_t *address, char *buf, socklen_t buflen) { uint16_t port; char address_string[INET6_ADDRSTRLEN]; memset(buf, 0, buflen); if (!ip_port_integer(address, &port)) return 0; if (!ip_address_string(address, address_string, sizeof(address_string))) return 0; if (snprintf(buf, buflen, "%s:%d", address_string, port) <= 0) return 0; return 1; } curvedns-curvedns-0.87/ip.h000066400000000000000000000073071150631715100157200ustar00rootroot00000000000000/* * Copyright 2010 CurveDNS Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY CurveDNS Project ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CurveDNS Project OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of CurveDNS Project. * */ /* * $Id$ * $Author$ * $Date$ * $Revision$ */ #ifndef IP_H_ #define IP_H_ #include #include #include #include #include #include #include /* uintx_t */ #include /* socklen_t */ #include /* in_addr_t, in_port_t, sockaddr_storage, htons(), ntohs() */ #include /* inet_pton(), inet_ntop() */ #include /* fcntl() */ #include /* getaddrinfo() */ #include /* libev */ typedef union { struct sockaddr sa; struct sockaddr_in sin; struct sockaddr_in6 sin6; } anysin_t; typedef enum { IP_PROTOCOL_UDP, IP_PROTOCOL_TCP, } ip_protocol_t; struct ip_socket_t { anysin_t *address; /* socket which is bind to */ int fd; /* fd of socket */ ip_protocol_t protocol; /* 0 = UDP, 1 = TCP */ }; extern anysin_t global_source_address; extern anysin_t global_target_address; extern socklen_t global_target_address_len; extern struct ip_socket_t *global_ip_sockets; extern int global_ip_sockets_count; extern ev_tstamp global_ip_internal_timeout; extern ev_tstamp global_ip_tcp_external_timeout; extern int global_ip_tcp_max_number_connections; extern size_t global_ip_tcp_buffersize; extern size_t global_ip_udp_buffersize; extern uint8_t global_ip_udp_retries; /* IP main functions */ extern int ip_init(anysin_t *, int); extern void ip_close(); extern int ip_bind_random(int); extern int ip_bind(int, anysin_t *); extern int ip_connect(int, anysin_t *); extern int ip_nonblock(int); extern int ip_reuse(int); extern int ip_udp_open(int *, anysin_t *); extern int ip_tcp_open(int *, anysin_t *); extern int ip_tcp_close(int); /* IP string handling */ extern int ip_parse(anysin_t *, const char *, const char *); extern anysin_t *ip_multiple_parse(int *, const char *, const char *); extern int ip_address_string(const anysin_t *, char *, socklen_t); extern int ip_port_integer(const anysin_t *, uint16_t *); extern int ip_address_total_string(const anysin_t *, char *, socklen_t); /* IP comparison */ extern int ip_compare_address(anysin_t *, anysin_t *); extern int ip_compare_port(anysin_t *, anysin_t *); #endif /* IP_H_ */ curvedns-curvedns-0.87/misc.c000066400000000000000000000171571150631715100162420ustar00rootroot00000000000000/* * Copyright 2010 CurveDNS Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY CurveDNS Project ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CurveDNS Project OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of CurveDNS Project. * */ /* * $Id$ * $Author$ * $Date$ * $Revision$ */ #include "misc.h" #include "ip.h" // An open descriptor to /dev/urandom int global_urandom_fd = -1; static char *misc_getenv(const char *env, int mandatory) { char *ptr; ptr = getenv(env); if (!ptr) { if (mandatory) { debug_log(DEBUG_FATAL, "the environment variable $%s must be set!\n", env); } return NULL; } return ptr; } // result = -1 - IP found in env, but not correct // 0 - no IP found in env // 1 - IP found in env and correct int misc_getenv_ip(const char *env, int mandatory, anysin_t *result) { char *ptr = misc_getenv(env, mandatory); if (ptr) { if (!ip_parse(result, ptr, "53")) return -1; return 1; } return 0; } int misc_getenv_int(const char *env, int mandatory, int *result) { char *ptr = misc_getenv(env, mandatory); if (ptr) { *result = atoi(ptr); return 1; } return 0; } int misc_getenv_double(const char *env, int mandatory, double *result) { char *ptr = misc_getenv(env, mandatory); if (ptr) { *result = atof(ptr); return 1; } return 0; } int misc_getenv_key(const char *env, int mandatory, uint8_t *result) { char *ptr; if (!(ptr = misc_getenv(env, mandatory))) { return 0; } if (strlen(ptr) != 64) { debug_log(DEBUG_FATAL, "key in $%s must be 64 bytes long\n", env); return 0; } if (!misc_hex_decode(ptr, result)) { debug_log(DEBUG_FATAL, "key in $%s appears to be invalid\n", env); return 0; } return 1; } int misc_char_hex(char in, uint8_t *out) { if ((in >= '0') && (in <= '9')) { *out = in - '0'; return 1; } else if ((in >= 'a') && (in <= 'f')) { *out = 10 + (in - 'a'); return 1; } else if ((in >= 'A') && (in <= 'F')) { *out = 10 + (in - 'A'); return 1; } else { return 0; } } int misc_hex_char(uint8_t in, char *out) { if (in < 10) *out = in + '0'; else if (in < 16) *out = (in - 10) + 'a'; else return 0; return 1; } int misc_hex_decode(const char *src, uint8_t *dst) { uint8_t v1, v2; while (*src) { if (!misc_char_hex(*src++, &v1)) return 0; if (!misc_char_hex(*src++, &v2)) return 0; *dst++ = (v1 << 4) | v2; } return 1; } int misc_hex_encode(const uint8_t *src, int srclen, char *dst, int dstlen) { int i = 0; memset(dst, 0, dstlen); if ((srclen * 2) < dstlen) return 0; while (i < srclen) { if (!misc_hex_char(src[i] >> 4, dst)) return 0; dst++; if (!misc_hex_char(src[i] & 0xf, dst)) return 0; dst++; i++; } return 1; } /* All needed for cryptography random functions, taken from djbdns */ static uint32_t seed[32]; static uint32_t in[12]; static uint32_t out[8]; static int outleft = 0; #define ROTATE(x,b) (((x) << (b)) | ((x) >> (32 - (b)))) #define MUSH(i,b) x = t[i] += (((x ^ seed[i]) + sum) ^ ROTATE(x,b)); static void surf(void) { uint32_t t[12]; uint32_t x; uint32_t sum = 0; int r; int i; int loop; for (i = 0;i < 12;++i) t[i] = in[i] ^ seed[12 + i]; for (i = 0;i < 8;++i) out[i] = seed[24 + i]; x = t[11]; for (loop = 0;loop < 2;++loop) { for (r = 0;r < 16;++r) { sum += 0x9e3779b9; MUSH(0,5) MUSH(1,7) MUSH(2,9) MUSH(3,13) MUSH(4,5) MUSH(5,7) MUSH(6,9) MUSH(7,13) MUSH(8,5) MUSH(9,7) MUSH(10,9) MUSH(11,13) } for (i = 0;i < 8;++i) out[i] ^= t[i + 4]; } } void misc_randombytes(uint8_t *x, unsigned long long xlen) { int i; while (xlen > 0) { if (xlen < 1048576) i = xlen; else i = 1048576; i = read(global_urandom_fd, x, i); if (i < 1) { sleep(1); continue; } x += i; xlen -= i; } } int misc_crypto_random_init() { global_urandom_fd = open("/dev/urandom", O_RDONLY); if (global_urandom_fd < 0) { perror("opening /dev/urandom failed"); return 0; } misc_randombytes((uint8_t *) in, sizeof(in)); return 1; } unsigned int misc_crypto_random(unsigned int n) { if (!n) return 0; if (!outleft) { if (!++in[0]) if (!++in[1]) if (!++in[2]) ++in[3]; surf(); outleft = 8; } return out[--outleft] % n; } // Make sure sizeof(nonce) >= 12 void misc_crypto_nonce(uint8_t *nonce, void *time, int len) { // We would like the first 64 bits to be time based. // The last 32 bits can be random. // XXX: but dirty solution, nicer way? if (len < 8) { memcpy(nonce, time, len); } else { memcpy(nonce, time, 8); len = 8; } for ( ; len < 12; len++) nonce[len] = misc_crypto_random(256); } static const uint8_t kValues[] = { 99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99, 99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,0,1, 2,3,4,5,6,7,8,9,99,99,99,99,99,99,99,99,10,11,12,99,13,14,15,99,16,17,18, 19,20,99,21,22,23,24,25,26,27,28,29,30,31,99,99,99,99,99,99,99,10,11,12,99, 13,14,15,99,16,17,18,19,20,99,21,22,23,24,25,26,27,28,29,30,31,99,99,99,99,99 }; // To Matthew Dempsky // XXX: maybe faster? int misc_base32_decode(uint8_t *output, unsigned int *ooutlen, const uint8_t *in, unsigned int inlen, int mode) { unsigned int i = 0, j = 0; unsigned int v = 0, bits = 0; const unsigned outlen = *ooutlen; while (j < inlen) { if (in[j] & 0x80) goto PROTO; const uint8_t b = kValues[in[j++]]; if (b > 31) goto PROTO; v |= ((unsigned) b) << bits; bits += 5; if (bits >= 8) { if (i >= outlen) goto TOOBIG; output[i++] = v; bits -= 8; v >>= 8; } } if (mode) { if (bits) { if (i >= outlen) goto TOOBIG; output[i++] = v; } } else if (bits >= 5 || v) goto PROTO; *ooutlen = i; return 1; TOOBIG: errno = E2BIG; return 0; PROTO: errno = EPROTO; return 0; } // To Matthew Dempsky // XXX: maybe faster? int misc_base32_encode(uint8_t *output, unsigned int *ooutlen, const uint8_t *in, unsigned int inlen) { unsigned int i = 0, j = 0; unsigned int v = 0, bits = 0; const unsigned outlen = *ooutlen; static const char kChars[] = "0123456789bcdfghjklmnpqrstuvwxyz"; while (j < inlen) { v |= ((unsigned) in[j++]) << bits; bits += 8; while (bits >= 5) { if (i >= outlen) goto TOOBIG; output[i++] = kChars[v & 31]; bits -= 5; v >>= 5; } } if (bits) { if (i >= outlen) goto TOOBIG; output[i++] = kChars[v & 31]; bits -= 5; v >>= 5; } *ooutlen = i; return 1; TOOBIG: errno = E2BIG; return 0; } curvedns-curvedns-0.87/misc.h000066400000000000000000000052071150631715100162400ustar00rootroot00000000000000/* * Copyright 2010 CurveDNS Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY CurveDNS Project ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CurveDNS Project OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of CurveDNS Project. * */ /* * $Id$ * $Author$ * $Date$ * $Revision$ */ #ifndef MISC_H_ #define MISC_H_ #include #include #include #include #include #include #include #include "debug.h" #include "ip.h" extern int misc_getenv_ip(const char *, int, anysin_t *); extern int misc_getenv_int(const char *, int, int *); extern int misc_getenv_double(const char *, int, double *); extern int misc_getenv_key(const char *, int, uint8_t *); extern int misc_char_hex(char, uint8_t *); extern int misc_hex_char(uint8_t, char *); extern int misc_hex_decode(const char *, uint8_t *); extern int misc_hex_encode(const uint8_t *, int, char *, int); extern int misc_base32_decode(uint8_t *, unsigned int *, const uint8_t *, unsigned int, int); extern int misc_base32_encode(uint8_t *, unsigned int *, const uint8_t *, unsigned int); extern void misc_randombytes(uint8_t *, unsigned long long); extern int misc_crypto_random_init(); extern unsigned int misc_crypto_random(unsigned int); extern void misc_crypto_nonce(uint8_t *, void *, int); #endif /* MISC_H_ */ curvedns-curvedns-0.87/nacl/000077500000000000000000000000001150631715100160455ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/MACROS000066400000000000000000000021001150631715100167450ustar00rootroot00000000000000crypto_verify crypto_verify_BYTES crypto_core crypto_core_OUTPUTBYTES crypto_core_INPUTBYTES crypto_core_KEYBYTES crypto_core_CONSTBYTES crypto_hashblocks crypto_hashblocks_STATEBYTES crypto_hashblocks_BLOCKBYTES crypto_hash crypto_hash_BYTES crypto_stream crypto_stream_xor crypto_stream_beforenm crypto_stream_afternm crypto_stream_xor_afternm crypto_stream_KEYBYTES crypto_stream_NONCEBYTES crypto_stream_BEFORENMBYTES crypto_onetimeauth crypto_onetimeauth_verify crypto_onetimeauth_BYTES crypto_onetimeauth_KEYBYTES crypto_auth crypto_auth_verify crypto_auth_BYTES crypto_auth_KEYBYTES crypto_secretbox crypto_secretbox_open crypto_secretbox_KEYBYTES crypto_secretbox_NONCEBYTES crypto_secretbox_ZEROBYTES crypto_secretbox_BOXZEROBYTES crypto_scalarmult crypto_scalarmult_base crypto_scalarmult_BYTES crypto_scalarmult_SCALARBYTES crypto_box crypto_box_open crypto_box_keypair crypto_box_beforenm crypto_box_afternm crypto_box_open_afternm crypto_box_PUBLICKEYBYTES crypto_box_SECRETKEYBYTES crypto_box_BEFORENMBYTES crypto_box_NONCEBYTES crypto_box_ZEROBYTES crypto_box_BOXZEROBYTES curvedns-curvedns-0.87/nacl/OPERATIONS000066400000000000000000000002231150631715100174500ustar00rootroot00000000000000crypto_verify crypto_core crypto_hashblocks crypto_hash crypto_stream crypto_onetimeauth crypto_auth crypto_secretbox crypto_scalarmult crypto_box curvedns-curvedns-0.87/nacl/PROTOTYPES.c000066400000000000000000000047411150631715100177470ustar00rootroot00000000000000extern int crypto_verify(const unsigned char *,const unsigned char *); extern int crypto_core(unsigned char *,const unsigned char *,const unsigned char *,const unsigned char *); extern int crypto_hashblocks(unsigned char *,const unsigned char *,unsigned long long); extern int crypto_hash(unsigned char *,const unsigned char *,unsigned long long); extern int crypto_stream(unsigned char *,unsigned long long,const unsigned char *,const unsigned char *); extern int crypto_stream_xor(unsigned char *,const unsigned char *,unsigned long long,const unsigned char *,const unsigned char *); extern int crypto_stream_beforenm(unsigned char *,const unsigned char *); extern int crypto_stream_afternm(unsigned char *,unsigned long long,const unsigned char *,const unsigned char *); extern int crypto_stream_xor_afternm(unsigned char *,const unsigned char *,unsigned long long,const unsigned char *,const unsigned char *); extern int crypto_onetimeauth(unsigned char *,const unsigned char *,unsigned long long,const unsigned char *); extern int crypto_onetimeauth_verify(const unsigned char *,const unsigned char *,unsigned long long,const unsigned char *); extern int crypto_auth(unsigned char *,const unsigned char *,unsigned long long,const unsigned char *); extern int crypto_auth_verify(const unsigned char *,const unsigned char *,unsigned long long,const unsigned char *); extern int crypto_secretbox(unsigned char *,const unsigned char *,unsigned long long,const unsigned char *,const unsigned char *); extern int crypto_secretbox_open(unsigned char *,const unsigned char *,unsigned long long,const unsigned char *,const unsigned char *); extern int crypto_scalarmult(unsigned char *,const unsigned char *,const unsigned char *); extern int crypto_scalarmult_base(unsigned char *,const unsigned char *); extern int crypto_box(unsigned char *,const unsigned char *,unsigned long long,const unsigned char *,const unsigned char *,const unsigned char *); extern int crypto_box_open(unsigned char *,const unsigned char *,unsigned long long,const unsigned char *,const unsigned char *,const unsigned char *); extern int crypto_box_keypair(unsigned char *,unsigned char *); extern int crypto_box_beforenm(unsigned char *,const unsigned char *,const unsigned char *); extern int crypto_box_afternm(unsigned char *,const unsigned char *,unsigned long long,const unsigned char *,const unsigned char *); extern int crypto_box_open_afternm(unsigned char *,const unsigned char *,unsigned long long,const unsigned char *,const unsigned char *); curvedns-curvedns-0.87/nacl/PROTOTYPES.cpp000066400000000000000000000022671150631715100203100ustar00rootroot00000000000000extern std::string crypto_auth(const std::string &,const std::string &); extern void crypto_auth_verify(const std::string &,const std::string &,const std::string &); extern std::string crypto_box(const std::string &,const std::string &,const std::string &,const std::string &); extern std::string crypto_box_open(const std::string &,const std::string &,const std::string &,const std::string &); extern std::string crypto_box_keypair(std::string *); extern std::string crypto_hash(const std::string &); extern std::string crypto_onetimeauth(const std::string &,const std::string &); extern void crypto_onetimeauth_verify(const std::string &,const std::string &,const std::string &); extern std::string crypto_scalarmult(const std::string &,const std::string &); extern std::string crypto_scalarmult_base(const std::string &); extern std::string crypto_secretbox(const std::string &,const std::string &,const std::string &); extern std::string crypto_secretbox_open(const std::string &,const std::string &,const std::string &); extern std::string crypto_stream(size_t,const std::string &,const std::string &); extern std::string crypto_stream_xor(const std::string &,const std::string &,const std::string &); curvedns-curvedns-0.87/nacl/commandline/000077500000000000000000000000001150631715100203335ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/commandline/nacl-sha256.c000066400000000000000000000023261150631715100224250ustar00rootroot00000000000000/* commandline/nacl-sha256.c version 20080713 D. J. Bernstein Public domain. */ #include #include #include #include #include #include #include #include "crypto_hash_sha256.h" unsigned char *input; unsigned long long inputalloc; unsigned long long inputlen; unsigned char h[crypto_hash_sha256_BYTES]; void h_print(void) { int i; for (i = 0;i < crypto_hash_sha256_BYTES;++i) printf("%02x",255 & (int) h[i]); printf("\n"); } int main() { struct stat st; int ch; if (fstat(0,&st) == 0) { input = mmap(0,st.st_size,PROT_READ,MAP_SHARED,0,0); if (input != MAP_FAILED) { crypto_hash_sha256(h,input,st.st_size); h_print(); return 0; } } input = 0; inputalloc = 0; inputlen = 0; while ((ch = getchar()) != EOF) { if (inputlen >= inputalloc) { void *newinput; while (inputlen >= inputalloc) inputalloc = inputalloc * 2 + 1; if (posix_memalign(&newinput,16,inputalloc) != 0) return 111; memcpy(newinput,input,inputlen); free(input); input = newinput; } input[inputlen++] = ch; } crypto_hash_sha256(h,input,inputlen); h_print(); return 0; } curvedns-curvedns-0.87/nacl/commandline/nacl-sha512.c000066400000000000000000000023261150631715100224200ustar00rootroot00000000000000/* commandline/nacl-sha512.c version 20080713 D. J. Bernstein Public domain. */ #include #include #include #include #include #include #include #include "crypto_hash_sha512.h" unsigned char *input; unsigned long long inputalloc; unsigned long long inputlen; unsigned char h[crypto_hash_sha512_BYTES]; void h_print(void) { int i; for (i = 0;i < crypto_hash_sha512_BYTES;++i) printf("%02x",255 & (int) h[i]); printf("\n"); } int main() { struct stat st; int ch; if (fstat(0,&st) == 0) { input = mmap(0,st.st_size,PROT_READ,MAP_SHARED,0,0); if (input != MAP_FAILED) { crypto_hash_sha512(h,input,st.st_size); h_print(); return 0; } } input = 0; inputalloc = 0; inputlen = 0; while ((ch = getchar()) != EOF) { if (inputlen >= inputalloc) { void *newinput; while (inputlen >= inputalloc) inputalloc = inputalloc * 2 + 1; if (posix_memalign(&newinput,16,inputalloc) != 0) return 111; memcpy(newinput,input,inputlen); free(input); input = newinput; } input[inputlen++] = ch; } crypto_hash_sha512(h,input,inputlen); h_print(); return 0; } curvedns-curvedns-0.87/nacl/cpucycles/000077500000000000000000000000001150631715100200375ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/cpucycles/alpha.c000066400000000000000000000027351150631715100212770ustar00rootroot00000000000000/* cpucycles/alpha.c version 20060316 D. J. Bernstein Public domain. */ #include #include #include static long long tod(void) { struct timeval t; gettimeofday(&t,(struct timezone *) 0); return t.tv_sec * (long long) 1000000 + t.tv_usec; } static long long rpcc(void) { unsigned long long t; asm volatile("rpcc %0" : "=r"(t)); return t & 0xffffffff; } static long long firstrpcc; static long long firsttod; static long long lastrpcc; static long long lasttod; static double mhz = 0; static void init(void) { firstrpcc = rpcc(); firsttod = tod(); do { lastrpcc = rpcc(); lasttod = tod(); } while (lasttod - firsttod < 10000); lastrpcc -= firstrpcc; lastrpcc &= 0xffffffff; lasttod -= firsttod; mhz = (double) lastrpcc / (double) lasttod; } long long cpucycles_alpha(void) { double x; long long y; if (!mhz) init(); lastrpcc = rpcc(); lasttod = tod(); lastrpcc -= firstrpcc; lastrpcc &= 0xffffffff; lasttod -= firsttod; /* Number of cycles since firstrpcc is lastrpcc + 2^32 y for unknown y. */ /* Number of microseconds since firsttod is lasttod. */ x = (lasttod * mhz - lastrpcc) * 0.00000000023283064365386962890625; y = x; while (x > y + 0.5) y += 1; while (x < y - 0.5) y -= 1; y *= 4294967296ULL; lastrpcc += y; mhz = (double) lastrpcc / (double) lasttod; return firstrpcc + lastrpcc; } long long cpucycles_alpha_persecond(void) { if (!mhz) init(); return 1000000.0 * mhz; } curvedns-curvedns-0.87/nacl/cpucycles/alpha.h000066400000000000000000000007171150631715100213020ustar00rootroot00000000000000/* cpucycles alpha.h version 20060318 D. J. Bernstein Public domain. */ #ifndef CPUCYCLES_alpha_h #define CPUCYCLES_alpha_h #ifdef __cplusplus extern "C" { #endif extern long long cpucycles_alpha(void); extern long long cpucycles_alpha_persecond(void); #ifdef __cplusplus } #endif #ifndef cpucycles_implementation #define cpucycles_implementation "alpha" #define cpucycles cpucycles_alpha #define cpucycles_persecond cpucycles_alpha_persecond #endif #endif curvedns-curvedns-0.87/nacl/cpucycles/amd64cpuinfo.c000066400000000000000000000005041150631715100225010ustar00rootroot00000000000000#include #include #include "osfreq.c" long long cpucycles_amd64cpuinfo(void) { unsigned long long result; asm volatile(".byte 15;.byte 49;shlq $32,%%rdx;orq %%rdx,%%rax" : "=a" (result) :: "%rdx"); return result; } long long cpucycles_amd64cpuinfo_persecond(void) { return osfreq(); } curvedns-curvedns-0.87/nacl/cpucycles/amd64cpuinfo.h000066400000000000000000000010071150631715100225050ustar00rootroot00000000000000/* cpucycles amd64cpuinfo.h version 20100803 D. J. Bernstein Public domain. */ #ifndef CPUCYCLES_amd64cpuinfo_h #define CPUCYCLES_amd64cpuinfo_h #ifdef __cplusplus extern "C" { #endif extern long long cpucycles_amd64cpuinfo(void); extern long long cpucycles_amd64cpuinfo_persecond(void); #ifdef __cplusplus } #endif #ifndef cpucycles_implementation #define cpucycles_implementation "amd64cpuinfo" #define cpucycles cpucycles_amd64cpuinfo #define cpucycles_persecond cpucycles_amd64cpuinfo_persecond #endif #endif curvedns-curvedns-0.87/nacl/cpucycles/amd64cpuspeed.c000066400000000000000000000010501150631715100226430ustar00rootroot00000000000000#include #include #include #include long long cpucycles_amd64cpuspeed(void) { unsigned long long result; asm volatile(".byte 15;.byte 49;shlq $32,%%rdx;orq %%rdx,%%rax" : "=a" (result) :: "%rdx"); return result; } long long cpucycles_amd64cpuspeed_persecond(void) { int oid[2]; int val; size_t size; oid[0] = CTL_HW; oid[1] = HW_CPUSPEED; size = sizeof val; if (sysctl(oid,2,&val,&size,0,0) == -1) return 0; if (size != sizeof val) return 0; return val * 1000000LL; } curvedns-curvedns-0.87/nacl/cpucycles/amd64cpuspeed.h000066400000000000000000000010171150631715100226530ustar00rootroot00000000000000/* cpucycles amd64cpuspeed.h version 20090716 Matthew Dempsky Public domain. */ #ifndef CPUCYCLES_amd64cpuspeed_h #define CPUCYCLES_amd64cpuspeed_h #ifdef __cplusplus extern "C" { #endif extern long long cpucycles_amd64cpuspeed(void); extern long long cpucycles_amd64cpuspeed_persecond(void); #ifdef __cplusplus } #endif #ifndef cpucycles_implementation #define cpucycles_implementation "amd64cpuspeed" #define cpucycles cpucycles_amd64cpuspeed #define cpucycles_persecond cpucycles_amd64cpuspeed_persecond #endif #endif curvedns-curvedns-0.87/nacl/cpucycles/amd64tscfreq.c000066400000000000000000000006371150631715100225140ustar00rootroot00000000000000#include #include long long cpucycles_amd64tscfreq(void) { unsigned long long result; asm volatile(".byte 15;.byte 49;shlq $32,%%rdx;orq %%rdx,%%rax" : "=a" (result) :: "%rdx"); return result; } long long cpucycles_amd64tscfreq_persecond(void) { long result = 0; size_t resultlen = sizeof(long); sysctlbyname("machdep.tsc_freq",&result,&resultlen,0,0); return result; } curvedns-curvedns-0.87/nacl/cpucycles/amd64tscfreq.h000066400000000000000000000010071150631715100225110ustar00rootroot00000000000000/* cpucycles amd64tscfreq.h version 20060318 D. J. Bernstein Public domain. */ #ifndef CPUCYCLES_amd64tscfreq_h #define CPUCYCLES_amd64tscfreq_h #ifdef __cplusplus extern "C" { #endif extern long long cpucycles_amd64tscfreq(void); extern long long cpucycles_amd64tscfreq_persecond(void); #ifdef __cplusplus } #endif #ifndef cpucycles_implementation #define cpucycles_implementation "amd64tscfreq" #define cpucycles cpucycles_amd64tscfreq #define cpucycles_persecond cpucycles_amd64tscfreq_persecond #endif #endif curvedns-curvedns-0.87/nacl/cpucycles/celllinux.c000066400000000000000000000032671150631715100222120ustar00rootroot00000000000000#include #include #include #include #include #include #include "osfreq.c" static long myround(double u) { long result = u; while (result + 0.5 < u) result += 1; while (result - 0.5 > u) result -= 1; return result; } static long long microseconds(void) { struct timeval t; gettimeofday(&t,(struct timezone *) 0); return t.tv_sec * (long long) 1000000 + t.tv_usec; } static long long timebase(void) { unsigned long long result; result = -spu_read_decrementer(); return 0xffffffff & result; } static double cpufrequency = 0; static long tbcycles = 0; static double guesstbcycles(void) { long long tb0; long long us0; long long tb1; long long us1; tb0 = timebase(); us0 = microseconds(); do { tb1 = timebase(); us1 = microseconds(); } while (us1 - us0 < 10000 || tb1 - tb0 < 1000); if (tb1 <= tb0) return 0; tb1 -= tb0; us1 -= us0; return (cpufrequency * 0.000001 * (double) us1) / (double) tb1; } static void init(void) { int loop; double guess1; double guess2; spu_write_decrementer(0xffffffff); cpufrequency = osfreq(); if (!cpufrequency) return; for (loop = 0;loop < 100;++loop) { guess1 = guesstbcycles(); guess2 = guesstbcycles(); tbcycles = myround(guess1); if (guess1 - tbcycles > 0.1) continue; if (tbcycles - guess1 > 0.1) continue; if (guess2 - tbcycles > 0.1) continue; if (tbcycles - guess2 > 0.1) continue; return; } tbcycles = 0; } long long cpucycles_celllinux(void) { if (!tbcycles) init(); return timebase() * tbcycles; } long long cpucycles_celllinux_persecond(void) { if (!tbcycles) init(); return cpufrequency; } curvedns-curvedns-0.87/nacl/cpucycles/celllinux.h000066400000000000000000000007571150631715100222200ustar00rootroot00000000000000/* cpucycles celllinux.h version 20081201 D. J. Bernstein Public domain. */ #ifndef CPUCYCLES_celllinux_h #define CPUCYCLES_celllinux_h #ifdef __cplusplus extern "C" { #endif extern long long cpucycles_celllinux(void); extern long long cpucycles_celllinux_persecond(void); #ifdef __cplusplus } #endif #ifndef cpucycles_implementation #define cpucycles_implementation "celllinux" #define cpucycles cpucycles_celllinux #define cpucycles_persecond cpucycles_celllinux_persecond #endif #endif curvedns-curvedns-0.87/nacl/cpucycles/dev4ns.c000066400000000000000000000033161150631715100214110ustar00rootroot00000000000000#include #include #include #include #include #include static int fddev = -1; static int prev[3]; static unsigned long long prevcycles = 0; static int now[3]; static long long cyclespersec = 0; static void readdev(unsigned int *result) { if (read(fddev,result,12) == 12) return; result[0] = result[1] = result[2] = 0; } long long cpucycles_dev4ns(void) { unsigned long long delta4; int deltan; int deltas; unsigned long long guesscycles; if (fddev == -1) { fddev = open("/dev/cpucycles4ns",O_RDONLY); readdev(prev); } readdev(now); delta4 = (unsigned int) (now[0] - prev[0]); /* unsigned change in number of cycles mod 2^32 */ deltan = now[1] - prev[1]; /* signed change in number of nanoseconds mod 10^9 */ deltas = now[2] - prev[2]; /* signed change in number of seconds */ if ((deltas == 0 && deltan < 200000000) || (deltas == 1 && deltan < -800000000)) return prevcycles + delta4; prev[0] = now[0]; prev[1] = now[1]; prev[2] = now[2]; if ((deltas == 0 && deltan < 300000000) || (deltas == 1 && deltan < -700000000)) { // actual number of cycles cannot have increased by 2^32 in <0.3ms cyclespersec = 1000000000 * (unsigned long long) delta4; cyclespersec /= deltan + 1000000000 * (long long) deltas; } else { guesscycles = deltas * cyclespersec; guesscycles += (deltan * cyclespersec) / 1000000000; while (delta4 + 2147483648ULL < guesscycles) delta4 += 4294967296ULL; /* XXX: could do longer-term extrapolation here */ } prevcycles += delta4; return prevcycles; } long long cpucycles_dev4ns_persecond(void) { while (!cyclespersec) cpucycles_dev4ns(); return cyclespersec; } curvedns-curvedns-0.87/nacl/cpucycles/dev4ns.h000066400000000000000000000007271150631715100214210ustar00rootroot00000000000000/* cpucycles dev4ns.h version 20100803 D. J. Bernstein Public domain. */ #ifndef CPUCYCLES_dev4ns_h #define CPUCYCLES_dev4ns_h #ifdef __cplusplus extern "C" { #endif extern long long cpucycles_dev4ns(void); extern long long cpucycles_dev4ns_persecond(void); #ifdef __cplusplus } #endif #ifndef cpucycles_implementation #define cpucycles_implementation "dev4ns" #define cpucycles cpucycles_dev4ns #define cpucycles_persecond cpucycles_dev4ns_persecond #endif #endif curvedns-curvedns-0.87/nacl/cpucycles/do000077500000000000000000000041611150631715100203710ustar00rootroot00000000000000#!/bin/sh -e okabi | ( while read abi do rm -f cpucycles.o cpucycles.h ( case "$abi" in ppc*) echo powerpccpuinfo echo powerpcmacos ;; amd64*) echo amd64tscfreq echo amd64cpuinfo echo amd64cpuspeed ;; x86*) echo x86tscfreq echo x86cpuinfo echo x86cpuspeed echo x86estimate ;; cell*) echo celllinux ;; sparc*) echo sparccpuinfo echo sparc32cpuinfo ;; mips*) echo mips ;; hppa*) echo hppapstat ;; alpha*) echo alpha ;; sgi*) echo sgi ;; arm*) echo dev4ns ;; esac echo amd64tscfreq echo amd64cpuinfo echo amd64cpuspeed echo x86tscfreq echo x86cpuinfo echo x86cpuspeed echo x86estimate echo ia64cpuinfo echo powerpccpuinfo echo powerpcmacos echo celllinux echo sparccpuinfo echo sparc32cpuinfo echo mips echo hppapstat echo alpha echo sgi echo dev4ns echo monotoniccpuinfo echo monotonic echo gettimeofday ) | ( while read n do okc-$abi | ( while read c do echo "=== `date` === Trying $n.c with $c..." >&2 rm -f test cpucycles-impl.o cpucycles-impl.h cpucycles-impl.c cp $n.c cpucycles-impl.c || continue cp $n.h cpucycles-impl.h || continue $c -c cpucycles-impl.c || continue $c -o test test.c cpucycles-impl.o || continue ./test || continue echo "=== `date` === Success. Using $n.c." >&2 mkdir -p lib/$abi mv cpucycles-impl.o lib/$abi/cpucycles.o mkdir -p include/$abi mv cpucycles-impl.h include/$abi/cpucycles.h exit 0 done exit 111 ) && exit 0 done exit 111 ) || ( echo ===== Giving up. >&2 rm -f test cpucycles-impl.o cpucycles-impl.h cpucycles-impl.c exit 111 ) || exit 0 done exit 0 ) || exit 111 curvedns-curvedns-0.87/nacl/cpucycles/gettimeofday.c000066400000000000000000000011121150631715100226570ustar00rootroot00000000000000#include #include #include #include #include #include "osfreq.c" static double cpufrequency = 0; static void init(void) { cpufrequency = osfreq(); } long long cpucycles_gettimeofday(void) { double result; struct timeval t; if (!cpufrequency) init(); gettimeofday(&t,(struct timezone *) 0); result = t.tv_usec; result *= 0.000001; result += (double) t.tv_sec; result *= cpufrequency; return result; } long long cpucycles_gettimeofday_persecond(void) { if (!cpufrequency) init(); return cpufrequency; } curvedns-curvedns-0.87/nacl/cpucycles/gettimeofday.h000066400000000000000000000010071150631715100226670ustar00rootroot00000000000000/* cpucycles gettimeofday.h version 20060318 D. J. Bernstein Public domain. */ #ifndef CPUCYCLES_gettimeofday_h #define CPUCYCLES_gettimeofday_h #ifdef __cplusplus extern "C" { #endif extern long long cpucycles_gettimeofday(void); extern long long cpucycles_gettimeofday_persecond(void); #ifdef __cplusplus } #endif #ifndef cpucycles_implementation #define cpucycles_implementation "gettimeofday" #define cpucycles cpucycles_gettimeofday #define cpucycles_persecond cpucycles_gettimeofday_persecond #endif #endif curvedns-curvedns-0.87/nacl/cpucycles/hppapstat.c000066400000000000000000000010421150631715100222040ustar00rootroot00000000000000#include #include #include #include #include #include long long cpucycles_hppapstat(void) { register long long result; _MFCTL(16,result); return result; } long long cpucycles_hppapstat_persecond(void) { struct pst_processor pst; union pstun pu; double result; pu.pst_processor = &pst; if (pstat(PSTAT_PROCESSOR,pu,sizeof(pst),1,0) < 0) return 0; result = pst.psp_iticksperclktick; result *= (double) sysconf(_SC_CLK_TCK); return result; } curvedns-curvedns-0.87/nacl/cpucycles/hppapstat.h000066400000000000000000000007571150631715100222250ustar00rootroot00000000000000/* cpucycles hppapstat.h version 20060319 D. J. Bernstein Public domain. */ #ifndef CPUCYCLES_hppapstat_h #define CPUCYCLES_hppapstat_h #ifdef __cplusplus extern "C" { #endif extern long long cpucycles_hppapstat(void); extern long long cpucycles_hppapstat_persecond(void); #ifdef __cplusplus } #endif #ifndef cpucycles_implementation #define cpucycles_implementation "hppapstat" #define cpucycles cpucycles_hppapstat #define cpucycles_persecond cpucycles_hppapstat_persecond #endif #endif curvedns-curvedns-0.87/nacl/cpucycles/ia64cpuinfo.c000066400000000000000000000004061150631715100223320ustar00rootroot00000000000000#include #include #include "osfreq.c" long long cpucycles_ia64cpuinfo(void) { long long result; asm volatile("mov %0=ar.itc" : "=r"(result)); return result; } long long cpucycles_ia64cpuinfo_persecond(void) { return osfreq(); } curvedns-curvedns-0.87/nacl/cpucycles/ia64cpuinfo.h000066400000000000000000000007771150631715100223520ustar00rootroot00000000000000/* cpucycles ia64cpuinfo.h version 20100803 D. J. Bernstein Public domain. */ #ifndef CPUCYCLES_ia64cpuinfo_h #define CPUCYCLES_ia64cpuinfo_h #ifdef __cplusplus extern "C" { #endif extern long long cpucycles_ia64cpuinfo(void); extern long long cpucycles_ia64cpuinfo_persecond(void); #ifdef __cplusplus } #endif #ifndef cpucycles_implementation #define cpucycles_implementation "ia64cpuinfo" #define cpucycles cpucycles_ia64cpuinfo #define cpucycles_persecond cpucycles_ia64cpuinfo_persecond #endif #endif curvedns-curvedns-0.87/nacl/cpucycles/mips.c000066400000000000000000000034221150631715100211540ustar00rootroot00000000000000/* cpucycles/mips.c version 20100803 D. J. Bernstein Public domain. */ #define SCALE 2 #include #include #include static int prev[3]; static unsigned long long prevcycles = 0; static int now[3]; static long long cyclespersec = 0; static void readticks(unsigned int *result) { struct timeval t; unsigned int cc; asm volatile(".byte 59; .byte 16; .byte 2; .byte 124; move %0,$2" : "=r"(cc) : : "$2"); gettimeofday(&t,(struct timezone *) 0); result[0] = cc; result[1] = t.tv_usec; result[2] = t.tv_sec; } long long cpucycles_mips(void) { unsigned long long delta4; int deltan; int deltas; unsigned long long guesscycles; readticks(now); delta4 = (unsigned int) (now[0] - prev[0]); /* unsigned change in number of cycles mod 2^32 */ deltan = now[1] - prev[1]; /* signed change in number of nanoseconds mod 10^9 */ deltas = now[2] - prev[2]; /* signed change in number of seconds */ if ((deltas == 0 && deltan < 200000) || (deltas == 1 && deltan < -800000)) return (prevcycles + delta4) * SCALE; prev[0] = now[0]; prev[1] = now[1]; prev[2] = now[2]; if ((deltas == 0 && deltan < 300000) || (deltas == 1 && deltan < -700000)) { // actual number of cycles cannot have increased by 2^32 in <0.3ms cyclespersec = 1000000 * (unsigned long long) delta4; cyclespersec /= deltan + 1000000 * (long long) deltas; } else { guesscycles = deltas * cyclespersec; guesscycles += (deltan * cyclespersec) / 1000000; while (delta4 + 2147483648ULL < guesscycles) delta4 += 4294967296ULL; /* XXX: could do longer-term extrapolation here */ } prevcycles += delta4; return prevcycles * SCALE; } long long cpucycles_mips_persecond(void) { while (!cyclespersec) cpucycles_mips(); return cyclespersec * SCALE; } curvedns-curvedns-0.87/nacl/cpucycles/mips.h000066400000000000000000000007071150631715100211640ustar00rootroot00000000000000/* cpucycles mips.h version 20100802 D. J. Bernstein Public domain. */ #ifndef CPUCYCLES_mips_h #define CPUCYCLES_mips_h #ifdef __cplusplus extern "C" { #endif extern long long cpucycles_mips(void); extern long long cpucycles_mips_persecond(void); #ifdef __cplusplus } #endif #ifndef cpucycles_implementation #define cpucycles_implementation "mips" #define cpucycles cpucycles_mips #define cpucycles_persecond cpucycles_mips_persecond #endif #endif curvedns-curvedns-0.87/nacl/cpucycles/monotonic.c000066400000000000000000000012641150631715100222130ustar00rootroot00000000000000#include #include #include #include #include #include static double cpufrequency = 0; static void init(void) { long result = 0; size_t resultlen = sizeof(long); sysctlbyname("machdep.tsc_freq",&result,&resultlen,0,0); cpufrequency = result; } long long cpucycles_monotonic(void) { double result; struct timespec t; if (!cpufrequency) init(); clock_gettime(CLOCK_MONOTONIC,&t); result = t.tv_nsec; result *= 0.000000001; result += (double) t.tv_sec; result *= cpufrequency; return result; } long long cpucycles_monotonic_persecond(void) { if (!cpufrequency) init(); return cpufrequency; } curvedns-curvedns-0.87/nacl/cpucycles/monotonic.h000066400000000000000000000007571150631715100222260ustar00rootroot00000000000000/* cpucycles monotonic.h version 20100803 D. J. Bernstein Public domain. */ #ifndef CPUCYCLES_monotonic_h #define CPUCYCLES_monotonic_h #ifdef __cplusplus extern "C" { #endif extern long long cpucycles_monotonic(void); extern long long cpucycles_monotonic_persecond(void); #ifdef __cplusplus } #endif #ifndef cpucycles_implementation #define cpucycles_implementation "monotonic" #define cpucycles cpucycles_monotonic #define cpucycles_persecond cpucycles_monotonic_persecond #endif #endif curvedns-curvedns-0.87/nacl/cpucycles/monotoniccpuinfo.c000066400000000000000000000011511150631715100235720ustar00rootroot00000000000000#include #include #include #include #include #include #include "osfreq.c" static double cpufrequency = 0; static void init(void) { cpufrequency = osfreq(); } long long cpucycles_monotoniccpuinfo(void) { double result; struct timespec t; if (!cpufrequency) init(); clock_gettime(CLOCK_MONOTONIC,&t); result = t.tv_nsec; result *= 0.000000001; result += (double) t.tv_sec; result *= cpufrequency; return result; } long long cpucycles_monotoniccpuinfo_persecond(void) { if (!cpufrequency) init(); return cpufrequency; } curvedns-curvedns-0.87/nacl/cpucycles/monotoniccpuinfo.h000066400000000000000000000010471150631715100236030ustar00rootroot00000000000000/* cpucycles monotoniccpuinfo.h version 20100804 D. J. Bernstein Public domain. */ #ifndef CPUCYCLES_monotoniccpuinfo_h #define CPUCYCLES_monotoniccpuinfo_h #ifdef __cplusplus extern "C" { #endif extern long long cpucycles_monotoniccpuinfo(void); extern long long cpucycles_monotoniccpuinfo_persecond(void); #ifdef __cplusplus } #endif #ifndef cpucycles_implementation #define cpucycles_implementation "monotoniccpuinfo" #define cpucycles cpucycles_monotoniccpuinfo #define cpucycles_persecond cpucycles_monotoniccpuinfo_persecond #endif #endif curvedns-curvedns-0.87/nacl/cpucycles/osfreq.c000066400000000000000000000022621150631715100215040ustar00rootroot00000000000000static double osfreq(void) { FILE *f; double result; int s; f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq", "r"); if (f) { s = fscanf(f,"%lf",&result); fclose(f); if (s > 0) return 1000.0 * result; } f = fopen("/sys/devices/system/cpu/cpu0/clock_tick", "r"); if (f) { s = fscanf(f,"%lf",&result); fclose(f); if (s > 0) return result; } f = fopen("/proc/cpuinfo","r"); if (f) { for (;;) { s = fscanf(f,"cpu MHz : %lf",&result); if (s > 0) break; if (s == 0) s = fscanf(f,"%*[^\n]\n"); if (s < 0) { result = 0; break; } } fclose(f); if (result) return 1000000.0 * result; } f = popen("/usr/sbin/lsattr -E -l proc0 -a frequency 2>/dev/null","r"); if (f) { s = fscanf(f,"frequency %lf",&result); pclose(f); if (s > 0) return result; } f = popen("/usr/sbin/psrinfo -v 2>/dev/null","r"); if (f) { for (;;) { s = fscanf(f," The %*s processor operates at %lf MHz",&result); if (s > 0) break; if (s == 0) s = fscanf(f,"%*[^\n]\n"); if (s < 0) { result = 0; break; } } pclose(f); if (result) return 1000000.0 * result; } return 0; } curvedns-curvedns-0.87/nacl/cpucycles/powerpccpuinfo.c000066400000000000000000000036521150631715100232540ustar00rootroot00000000000000#include #include #include #include #include #include "osfreq.c" static long myround(double u) { long result = u; while (result + 0.5 < u) result += 1; while (result - 0.5 > u) result -= 1; return result; } static long long microseconds(void) { struct timeval t; gettimeofday(&t,(struct timezone *) 0); return t.tv_sec * (long long) 1000000 + t.tv_usec; } static int tbshift = 0; static long long timebase(void) { unsigned long high; unsigned long low; unsigned long newhigh; unsigned long long result; asm volatile( "7:mftbu %0;mftb %1;mftbu %2;cmpw %0,%2;bne 7b" : "=r" (high), "=r" (low), "=r" (newhigh) ); result = high; result <<= 32; result |= low; return result >> tbshift; } static double cpufrequency = 0; static long tbcycles = 0; static double guesstbcycles(void) { long long tb0; long long us0; long long tb1; long long us1; tb0 = timebase(); us0 = microseconds(); do { tb1 = timebase(); us1 = microseconds(); } while (us1 - us0 < 10000 || tb1 - tb0 < 1000); if (tb1 <= tb0) return 0; tb1 -= tb0; us1 -= us0; return (cpufrequency * 0.000001 * (double) us1) / (double) tb1; } static void init(void) { int loop; double guess1; double guess2; cpufrequency = osfreq(); if (!cpufrequency) return; for (tbshift = 0;tbshift < 10;++tbshift) { for (loop = 0;loop < 100;++loop) { guess1 = guesstbcycles(); guess2 = guesstbcycles(); tbcycles = myround(guess1); if (guess1 - tbcycles > 0.1) continue; if (tbcycles - guess1 > 0.1) continue; if (guess2 - tbcycles > 0.1) continue; if (tbcycles - guess2 > 0.1) continue; return; } } tbcycles = 0; } long long cpucycles_powerpccpuinfo(void) { if (!tbcycles) init(); return timebase() * tbcycles; } long long cpucycles_powerpccpuinfo_persecond(void) { if (!tbcycles) init(); return cpufrequency; } curvedns-curvedns-0.87/nacl/cpucycles/powerpccpuinfo.h000066400000000000000000000010271150631715100232530ustar00rootroot00000000000000/* cpucycles powerpccpuinfo.h version 20100803 D. J. Bernstein Public domain. */ #ifndef CPUCYCLES_powerpccpuinfo_h #define CPUCYCLES_powerpccpuinfo_h #ifdef __cplusplus extern "C" { #endif extern long long cpucycles_powerpccpuinfo(void); extern long long cpucycles_powerpccpuinfo_persecond(void); #ifdef __cplusplus } #endif #ifndef cpucycles_implementation #define cpucycles_implementation "powerpccpuinfo" #define cpucycles cpucycles_powerpccpuinfo #define cpucycles_persecond cpucycles_powerpccpuinfo_persecond #endif #endif curvedns-curvedns-0.87/nacl/cpucycles/powerpcmacos.c000066400000000000000000000021641150631715100227100ustar00rootroot00000000000000#include #include #include #define timebase mach_absolute_time static int cpumib[2] = { CTL_HW, HW_CPU_FREQ } ; static int tbmib[2] = { CTL_HW, HW_TB_FREQ } ; static long myround(double u) { long result = u; while (result + 0.5 < u) result += 1; while (result - 0.5 > u) result -= 1; return result; } static long tbcycles = 0; static void init(void) { unsigned int cpufrequency = 0; size_t cpufrequencylen = sizeof(unsigned int); unsigned int tbfrequency = 0; size_t tbfrequencylen = sizeof(unsigned int); sysctl(cpumib,2,&cpufrequency,&cpufrequencylen,0,0); sysctl(tbmib,2,&tbfrequency,&tbfrequencylen,0,0); if (tbfrequency > 0) tbcycles = myround((double) (unsigned long long) cpufrequency / (double) (unsigned long long) tbfrequency); } long long cpucycles_powerpcmacos(void) { if (!tbcycles) init(); return timebase() * tbcycles; } long long cpucycles_powerpcmacos_persecond(void) { unsigned int result = 0; size_t resultlen = sizeof(unsigned int); sysctl(cpumib,2,&result,&resultlen,0,0); return (unsigned long long) result; } curvedns-curvedns-0.87/nacl/cpucycles/powerpcmacos.h000066400000000000000000000010071150631715100227100ustar00rootroot00000000000000/* cpucycles powerpcmacos.h version 20060319 D. J. Bernstein Public domain. */ #ifndef CPUCYCLES_powerpcmacos_h #define CPUCYCLES_powerpcmacos_h #ifdef __cplusplus extern "C" { #endif extern long long cpucycles_powerpcmacos(void); extern long long cpucycles_powerpcmacos_persecond(void); #ifdef __cplusplus } #endif #ifndef cpucycles_implementation #define cpucycles_implementation "powerpcmacos" #define cpucycles cpucycles_powerpcmacos #define cpucycles_persecond cpucycles_powerpcmacos_persecond #endif #endif curvedns-curvedns-0.87/nacl/cpucycles/sgi.c000066400000000000000000000013651150631715100207720ustar00rootroot00000000000000#include #include #include #include #include #include static double cpufrequency = 0; static void init(void) { FILE *f; f = popen("hinv -c processor | awk '{if ($3==\"MHZ\") print $2*1000000}'","r"); if (!f) return; if (fscanf(f,"%lf",&cpufrequency) < 1) cpufrequency = 0; pclose(f); if (!cpufrequency) return; } long long cpucycles_sgi(void) { double result; struct timespec t; if (!cpufrequency) init(); clock_gettime(CLOCK_SGI_CYCLE,&t); result = t.tv_nsec; result *= 0.000000001; result += (double) t.tv_sec; result *= cpufrequency; return result; } long long cpucycles_sgi_persecond(void) { if (!cpufrequency) init(); return cpufrequency; } curvedns-curvedns-0.87/nacl/cpucycles/sgi.h000066400000000000000000000006771150631715100210040ustar00rootroot00000000000000/* cpucycles sgi.h version 20070916 D. J. Bernstein Public domain. */ #ifndef CPUCYCLES_sgi_h #define CPUCYCLES_sgi_h #ifdef __cplusplus extern "C" { #endif extern long long cpucycles_sgi(void); extern long long cpucycles_sgi_persecond(void); #ifdef __cplusplus } #endif #ifndef cpucycles_implementation #define cpucycles_implementation "sgi" #define cpucycles cpucycles_sgi #define cpucycles_persecond cpucycles_sgi_persecond #endif #endif curvedns-curvedns-0.87/nacl/cpucycles/sparc32cpuinfo.c000066400000000000000000000005221150631715100230430ustar00rootroot00000000000000#include #include #include "osfreq.c" long long cpucycles_sparc32cpuinfo(void) { long long result; asm volatile(".word 2202075136; .word 2570088480; srl %%g1,0,%L0; mov %%o4,%H0" : "=r" (result) : : "g1","o4"); return result; } long long cpucycles_sparc32cpuinfo_persecond(void) { return osfreq(); } curvedns-curvedns-0.87/nacl/cpucycles/sparc32cpuinfo.h000066400000000000000000000010271150631715100230510ustar00rootroot00000000000000/* cpucycles sparc32cpuinfo.h version 20100804 D. J. Bernstein Public domain. */ #ifndef CPUCYCLES_sparc32cpuinfo_h #define CPUCYCLES_sparc32cpuinfo_h #ifdef __cplusplus extern "C" { #endif extern long long cpucycles_sparc32cpuinfo(void); extern long long cpucycles_sparc32cpuinfo_persecond(void); #ifdef __cplusplus } #endif #ifndef cpucycles_implementation #define cpucycles_implementation "sparc32cpuinfo" #define cpucycles cpucycles_sparc32cpuinfo #define cpucycles_persecond cpucycles_sparc32cpuinfo_persecond #endif #endif curvedns-curvedns-0.87/nacl/cpucycles/sparccpuinfo.c000066400000000000000000000004101150631715100226720ustar00rootroot00000000000000#include #include #include "osfreq.c" long long cpucycles_sparccpuinfo(void) { long long result; asm volatile("rd %%tick,%0" : "=r" (result)); return result; } long long cpucycles_sparccpuinfo_persecond(void) { return osfreq(); } curvedns-curvedns-0.87/nacl/cpucycles/sparccpuinfo.h000066400000000000000000000010071150631715100227020ustar00rootroot00000000000000/* cpucycles sparccpuinfo.h version 20100803 D. J. Bernstein Public domain. */ #ifndef CPUCYCLES_sparccpuinfo_h #define CPUCYCLES_sparccpuinfo_h #ifdef __cplusplus extern "C" { #endif extern long long cpucycles_sparccpuinfo(void); extern long long cpucycles_sparccpuinfo_persecond(void); #ifdef __cplusplus } #endif #ifndef cpucycles_implementation #define cpucycles_implementation "sparccpuinfo" #define cpucycles cpucycles_sparccpuinfo #define cpucycles_persecond cpucycles_sparccpuinfo_persecond #endif #endif curvedns-curvedns-0.87/nacl/cpucycles/test.c000066400000000000000000000035631150631715100211710ustar00rootroot00000000000000#include #include #include #include #include "cpucycles-impl.h" static long long tod(void) { struct timeval t; gettimeofday(&t,(struct timezone *) 0); return t.tv_sec * (long long) 1000000 + t.tv_usec; } long long todstart; long long todend; long long cpustart; long long cpuend; long long cyclespersecond; long long cyclespertod; long long t[1001]; int main() { int j; int i; if (!cpucycles()) { fprintf(stderr,"cpucycles() = %lld\n",cpucycles()); return 100; } for (i = 0;i <= 1000;++i) t[i] = cpucycles(); for (i = 0;i < 1000;++i) if (t[i] > t[i + 1]) { fprintf(stderr,"t[%d] = %lld\n",i,t[i]); fprintf(stderr,"t[%d] = %lld\n",i + 1,t[i + 1]); fprintf(stderr,"cpucycles_persecond() = %lld\n",cpucycles_persecond()); return 100; } if (t[0] == t[1000]) { fprintf(stderr,"t[%d] = %lld\n",0,t[0]); fprintf(stderr,"t[%d] = %lld\n",1000,t[1000]); fprintf(stderr,"cpucycles_persecond() = %lld\n",cpucycles_persecond()); return 100; } cyclespersecond = cpucycles_persecond(); if (cyclespersecond <= 0) { fprintf(stderr,"cpucycles_persecond() = %lld\n",cyclespersecond); return 100; } todstart = tod(); cpustart = cpucycles(); for (j = 0;j < 1000;++j) for (i = 0;i <= 1000;++i) t[i] = t[i] + i + j; todend = tod(); cpuend = cpucycles(); todend -= todstart; cpuend -= cpustart; cyclespertod = (long long) (((double) cpuend) * 1000000.0 / (double) todend); if (cyclespertod > 10 * cyclespersecond) { fprintf(stderr,"cyclespertod = %lld, cyclespersecond = %lld\n",cyclespertod,cyclespersecond); return 100; } for (i = 0;i <= 1000;++i) t[i] = cpucycles(); printf("%s",cpucycles_implementation); printf(" %lld",cyclespersecond); printf(" %lld",cyclespertod); for (i = 0;i < 64;++i) printf(" %lld",t[i + 1] - t[i]); printf("\n"); return 0; } curvedns-curvedns-0.87/nacl/cpucycles/x86cpuinfo.c000066400000000000000000000004111150631715100222100ustar00rootroot00000000000000#include #include #include "osfreq.c" long long cpucycles_x86cpuinfo(void) { long long result; asm volatile(".byte 15;.byte 49" : "=A" (result)); return result; } long long cpucycles_x86cpuinfo_persecond(void) { return osfreq(); } curvedns-curvedns-0.87/nacl/cpucycles/x86cpuinfo.h000066400000000000000000000007671150631715100222330ustar00rootroot00000000000000/* cpucycles x86cpuinfo.h version 20100803 D. J. Bernstein Public domain. */ #ifndef CPUCYCLES_x86cpuinfo_h #define CPUCYCLES_x86cpuinfo_h #ifdef __cplusplus extern "C" { #endif extern long long cpucycles_x86cpuinfo(void); extern long long cpucycles_x86cpuinfo_persecond(void); #ifdef __cplusplus } #endif #ifndef cpucycles_implementation #define cpucycles_implementation "x86cpuinfo" #define cpucycles cpucycles_x86cpuinfo #define cpucycles_persecond cpucycles_x86cpuinfo_persecond #endif #endif curvedns-curvedns-0.87/nacl/cpucycles/x86cpuspeed.c000066400000000000000000000007551150631715100223700ustar00rootroot00000000000000#include #include #include #include long long cpucycles_x86cpuspeed(void) { long long result; asm volatile(".byte 15;.byte 49" : "=A" (result)); return result; } long long cpucycles_x86cpuspeed_persecond(void) { int oid[2]; int val; size_t size; oid[0] = CTL_HW; oid[1] = HW_CPUSPEED; size = sizeof val; if (sysctl(oid,2,&val,&size,0,0) == -1) return 0; if (size != sizeof val) return 0; return val * 1000000LL; } curvedns-curvedns-0.87/nacl/cpucycles/x86cpuspeed.h000066400000000000000000000007771150631715100224010ustar00rootroot00000000000000/* cpucycles x86cpuspeed.h version 20090716 Matthew Dempsky Public domain. */ #ifndef CPUCYCLES_x86cpuspeed_h #define CPUCYCLES_x86cpuspeed_h #ifdef __cplusplus extern "C" { #endif extern long long cpucycles_x86cpuspeed(void); extern long long cpucycles_x86cpuspeed_persecond(void); #ifdef __cplusplus } #endif #ifndef cpucycles_implementation #define cpucycles_implementation "x86cpuspeed" #define cpucycles cpucycles_x86cpuspeed #define cpucycles_persecond cpucycles_x86cpuspeed_persecond #endif #endif curvedns-curvedns-0.87/nacl/cpucycles/x86estimate.c000066400000000000000000000022661150631715100223720ustar00rootroot00000000000000#include #include #include #include long long cpucycles_x86estimate(void) { long long result; asm volatile(".byte 15;.byte 49" : "=A" (result)); return result; } static long long microseconds(void) { struct timeval t; gettimeofday(&t,(struct timezone *) 0); return t.tv_sec * (long long) 1000000 + t.tv_usec; } static double guessfreq(void) { long long tb0; long long us0; long long tb1; long long us1; tb0 = cpucycles_x86estimate(); us0 = microseconds(); do { tb1 = cpucycles_x86estimate(); us1 = microseconds(); } while (us1 - us0 < 10000 || tb1 - tb0 < 1000); if (tb1 <= tb0) return 0; tb1 -= tb0; us1 -= us0; return ((double) tb1) / (0.000001 * (double) us1); } static double cpufrequency = 0; static void init(void) { double guess1; double guess2; int loop; for (loop = 0;loop < 100;++loop) { guess1 = guessfreq(); guess2 = guessfreq(); if (guess1 > 1.01 * guess2) continue; if (guess2 > 1.01 * guess1) continue; cpufrequency = 0.5 * (guess1 + guess2); break; } } long long cpucycles_x86estimate_persecond(void) { if (!cpufrequency) init(); return cpufrequency; } curvedns-curvedns-0.87/nacl/cpucycles/x86estimate.h000066400000000000000000000007771150631715100224040ustar00rootroot00000000000000/* cpucycles x86estimate.h version 20070121 D. J. Bernstein Public domain. */ #ifndef CPUCYCLES_x86estimate_h #define CPUCYCLES_x86estimate_h #ifdef __cplusplus extern "C" { #endif extern long long cpucycles_x86estimate(void); extern long long cpucycles_x86estimate_persecond(void); #ifdef __cplusplus } #endif #ifndef cpucycles_implementation #define cpucycles_implementation "x86estimate" #define cpucycles cpucycles_x86estimate #define cpucycles_persecond cpucycles_x86estimate_persecond #endif #endif curvedns-curvedns-0.87/nacl/cpucycles/x86tscfreq.c000066400000000000000000000005511150631715100222210ustar00rootroot00000000000000#include #include long long cpucycles_x86tscfreq(void) { long long result; asm volatile(".byte 15;.byte 49" : "=A" (result)); return result; } long long cpucycles_x86tscfreq_persecond(void) { long result = 0; size_t resultlen = sizeof(long); sysctlbyname("machdep.tsc_freq",&result,&resultlen,0,0); return result; } curvedns-curvedns-0.87/nacl/cpucycles/x86tscfreq.h000066400000000000000000000007671150631715100222370ustar00rootroot00000000000000/* cpucycles x86tscfreq.h version 20060318 D. J. Bernstein Public domain. */ #ifndef CPUCYCLES_x86tscfreq_h #define CPUCYCLES_x86tscfreq_h #ifdef __cplusplus extern "C" { #endif extern long long cpucycles_x86tscfreq(void); extern long long cpucycles_x86tscfreq_persecond(void); #ifdef __cplusplus } #endif #ifndef cpucycles_implementation #define cpucycles_implementation "x86tscfreq" #define cpucycles cpucycles_x86tscfreq #define cpucycles_persecond cpucycles_x86tscfreq_persecond #endif #endif curvedns-curvedns-0.87/nacl/cpuid/000077500000000000000000000000001150631715100171515ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/cpuid/cbytes.c000066400000000000000000000003531150631715100206070ustar00rootroot00000000000000#include int main() { char ch; int loop = 0; while (scanf("%c",&ch) == 1) { printf("0x%02x,",255 & (int) ch); if (++loop == 16) { loop = 0; printf("\n"); } } printf("0x00\n"); return 0; } curvedns-curvedns-0.87/nacl/cpuid/cpuid.c000066400000000000000000000012601150631715100204200ustar00rootroot00000000000000#include #include #include #include void nope() { exit(1); } int main() { unsigned long x[4]; unsigned long y[4]; int i; int j; char c; signal(SIGILL,nope); x[0] = 0; x[1] = 0; x[2] = 0; x[3] = 0; asm volatile(".byte 15;.byte 162" : "=a"(x[0]),"=b"(x[1]),"=c"(x[3]),"=d"(x[2]) : "0"(0) ); if (!x[0]) return 0; asm volatile(".byte 15;.byte 162" : "=a"(y[0]),"=b"(y[1]),"=c"(y[2]),"=d"(y[3]) : "0"(1) ); for (i = 1;i < 4;++i) for (j = 0;j < 4;++j) { c = x[i] >> (8 * j); if (c < 32) c = 32; if (c > 126) c = 126; putchar(c); } printf("-%08x-%08x\n",y[0],y[3]); return 0; } curvedns-curvedns-0.87/nacl/cpuid/do000077500000000000000000000014151150631715100175020ustar00rootroot00000000000000#!/bin/sh -e mkdir include ( echo x86 echo unknown ) | ( while read n do okabi | ( while read abi do okc-$abi | ( while read c do echo "=== `date` === Trying $n.c with $c..." >&2 rm -f cpuid.c cp $n.c cpuid.c || continue $c -o cpuid cpuid.c || continue $c -o cbytes cbytes.c || continue ./cpuid > cpuid.out || continue echo 'static const char cpuid[] = {' > cpuid.h || continue ./cbytes < cpuid.out >> cpuid.h || continue echo '} ;' >> cpuid.h || continue cp cpuid.h include/cpuid.h || continue cat cpuid.out exit 0 done exit 111 ) && exit 0 done exit 111 ) && exit 0 done exit 111 ) curvedns-curvedns-0.87/nacl/cpuid/unknown.c000066400000000000000000000001111150631715100210050ustar00rootroot00000000000000#include main() { printf("unknown CPU ID\n"); return 0; } curvedns-curvedns-0.87/nacl/cpuid/x86.c000066400000000000000000000013161150631715100177430ustar00rootroot00000000000000#include #include #include #include void nope() { exit(1); } int main() { unsigned long x[4]; unsigned long y[4]; int i; int j; char c; signal(SIGILL,nope); x[0] = 0; x[1] = 0; x[2] = 0; x[3] = 0; asm volatile(".byte 15;.byte 162" : "=a"(x[0]),"=b"(x[1]),"=c"(x[3]),"=d"(x[2]) : "0"(0) ); if (!x[0]) return 0; asm volatile(".byte 15;.byte 162" : "=a"(y[0]),"=b"(y[1]),"=c"(y[2]),"=d"(y[3]) : "0"(1) ); for (i = 1;i < 4;++i) for (j = 0;j < 4;++j) { c = x[i] >> (8 * j); if (c < 32) c = 32; if (c > 126) c = 126; putchar(c); } printf("-%08x-%08x\n",(unsigned int) y[0],(unsigned int) y[3]); return 0; } curvedns-curvedns-0.87/nacl/crypto_auth/000077500000000000000000000000001150631715100204065ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_auth/hmacsha256/000077500000000000000000000000001150631715100222475ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_auth/hmacsha256/checksum000066400000000000000000000001011150631715100237640ustar00rootroot000000000000003bd7abd4f4dce04396f2ac7cb1cff70607f692411c49a1563b037d31e1662632 curvedns-curvedns-0.87/nacl/crypto_auth/hmacsha256/ref/000077500000000000000000000000001150631715100230235ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_auth/hmacsha256/ref/api.h000066400000000000000000000000631150631715100237440ustar00rootroot00000000000000#define CRYPTO_BYTES 32 #define CRYPTO_KEYBYTES 32 curvedns-curvedns-0.87/nacl/crypto_auth/hmacsha256/ref/hmac.c000066400000000000000000000035521150631715100241040ustar00rootroot00000000000000/* * 20080913 * D. J. Bernstein * Public domain. * */ #include "crypto_hashblocks_sha256.h" #include "crypto_auth.h" #define blocks crypto_hashblocks_sha256 typedef unsigned int uint32; static const char iv[32] = { 0x6a,0x09,0xe6,0x67, 0xbb,0x67,0xae,0x85, 0x3c,0x6e,0xf3,0x72, 0xa5,0x4f,0xf5,0x3a, 0x51,0x0e,0x52,0x7f, 0x9b,0x05,0x68,0x8c, 0x1f,0x83,0xd9,0xab, 0x5b,0xe0,0xcd,0x19, } ; int crypto_auth(unsigned char *out,const unsigned char *in,unsigned long long inlen,const unsigned char *k) { unsigned char h[32]; unsigned char padded[128]; int i; unsigned long long bits = 512 + (inlen << 3); for (i = 0;i < 32;++i) h[i] = iv[i]; for (i = 0;i < 32;++i) padded[i] = k[i] ^ 0x36; for (i = 32;i < 64;++i) padded[i] = 0x36; blocks(h,padded,64); blocks(h,in,inlen); in += inlen; inlen &= 63; in -= inlen; for (i = 0;i < inlen;++i) padded[i] = in[i]; padded[inlen] = 0x80; if (inlen < 56) { for (i = inlen + 1;i < 56;++i) padded[i] = 0; padded[56] = bits >> 56; padded[57] = bits >> 48; padded[58] = bits >> 40; padded[59] = bits >> 32; padded[60] = bits >> 24; padded[61] = bits >> 16; padded[62] = bits >> 8; padded[63] = bits; blocks(h,padded,64); } else { for (i = inlen + 1;i < 120;++i) padded[i] = 0; padded[120] = bits >> 56; padded[121] = bits >> 48; padded[122] = bits >> 40; padded[123] = bits >> 32; padded[124] = bits >> 24; padded[125] = bits >> 16; padded[126] = bits >> 8; padded[127] = bits; blocks(h,padded,128); } for (i = 0;i < 32;++i) padded[i] = k[i] ^ 0x5c; for (i = 32;i < 64;++i) padded[i] = 0x5c; for (i = 0;i < 32;++i) padded[64 + i] = h[i]; for (i = 0;i < 32;++i) out[i] = iv[i]; for (i = 32;i < 64;++i) padded[64 + i] = 0; padded[64 + 32] = 0x80; padded[64 + 62] = 3; blocks(out,padded,128); return 0; } curvedns-curvedns-0.87/nacl/crypto_auth/hmacsha256/ref/verify.c000066400000000000000000000004311150631715100244710ustar00rootroot00000000000000#include "crypto_verify_32.h" #include "crypto_auth.h" int crypto_auth_verify(const unsigned char *h,const unsigned char *in,unsigned long long inlen,const unsigned char *k) { unsigned char correct[32]; crypto_auth(correct,in,inlen,k); return crypto_verify_32(h,correct); } curvedns-curvedns-0.87/nacl/crypto_auth/hmacsha256/used000066400000000000000000000000001150631715100231200ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_auth/hmacsha512256/000077500000000000000000000000001150631715100224775ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_auth/hmacsha512256/checksum000066400000000000000000000001011150631715100242140ustar00rootroot000000000000002f5e8a6a0cac012d8d001351d7d583e69f91390df46305c3608e0c2893491886 curvedns-curvedns-0.87/nacl/crypto_auth/hmacsha512256/ref/000077500000000000000000000000001150631715100232535ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_auth/hmacsha512256/ref/api.h000066400000000000000000000000631150631715100241740ustar00rootroot00000000000000#define CRYPTO_BYTES 32 #define CRYPTO_KEYBYTES 32 curvedns-curvedns-0.87/nacl/crypto_auth/hmacsha512256/ref/hmac.c000066400000000000000000000042431150631715100243320ustar00rootroot00000000000000/* * 20080913 * D. J. Bernstein * Public domain. * */ #include "crypto_hashblocks_sha512.h" #include "crypto_auth.h" #define blocks crypto_hashblocks_sha512 typedef unsigned long long uint64; static const unsigned char iv[64] = { 0x6a,0x09,0xe6,0x67,0xf3,0xbc,0xc9,0x08, 0xbb,0x67,0xae,0x85,0x84,0xca,0xa7,0x3b, 0x3c,0x6e,0xf3,0x72,0xfe,0x94,0xf8,0x2b, 0xa5,0x4f,0xf5,0x3a,0x5f,0x1d,0x36,0xf1, 0x51,0x0e,0x52,0x7f,0xad,0xe6,0x82,0xd1, 0x9b,0x05,0x68,0x8c,0x2b,0x3e,0x6c,0x1f, 0x1f,0x83,0xd9,0xab,0xfb,0x41,0xbd,0x6b, 0x5b,0xe0,0xcd,0x19,0x13,0x7e,0x21,0x79 } ; int crypto_auth(unsigned char *out,const unsigned char *in,unsigned long long inlen,const unsigned char *k) { unsigned char h[64]; unsigned char padded[256]; int i; unsigned long long bytes = 128 + inlen; for (i = 0;i < 64;++i) h[i] = iv[i]; for (i = 0;i < 32;++i) padded[i] = k[i] ^ 0x36; for (i = 32;i < 128;++i) padded[i] = 0x36; blocks(h,padded,128); blocks(h,in,inlen); in += inlen; inlen &= 127; in -= inlen; for (i = 0;i < inlen;++i) padded[i] = in[i]; padded[inlen] = 0x80; if (inlen < 112) { for (i = inlen + 1;i < 119;++i) padded[i] = 0; padded[119] = bytes >> 61; padded[120] = bytes >> 53; padded[121] = bytes >> 45; padded[122] = bytes >> 37; padded[123] = bytes >> 29; padded[124] = bytes >> 21; padded[125] = bytes >> 13; padded[126] = bytes >> 5; padded[127] = bytes << 3; blocks(h,padded,128); } else { for (i = inlen + 1;i < 247;++i) padded[i] = 0; padded[247] = bytes >> 61; padded[248] = bytes >> 53; padded[249] = bytes >> 45; padded[250] = bytes >> 37; padded[251] = bytes >> 29; padded[252] = bytes >> 21; padded[253] = bytes >> 13; padded[254] = bytes >> 5; padded[255] = bytes << 3; blocks(h,padded,256); } for (i = 0;i < 32;++i) padded[i] = k[i] ^ 0x5c; for (i = 32;i < 128;++i) padded[i] = 0x5c; for (i = 0;i < 64;++i) padded[128 + i] = h[i]; for (i = 0;i < 64;++i) h[i] = iv[i]; for (i = 64;i < 128;++i) padded[128 + i] = 0; padded[128 + 64] = 0x80; padded[128 + 126] = 6; blocks(h,padded,256); for (i = 0;i < 32;++i) out[i] = h[i]; return 0; } curvedns-curvedns-0.87/nacl/crypto_auth/hmacsha512256/ref/verify.c000066400000000000000000000004311150631715100247210ustar00rootroot00000000000000#include "crypto_verify_32.h" #include "crypto_auth.h" int crypto_auth_verify(const unsigned char *h,const unsigned char *in,unsigned long long inlen,const unsigned char *k) { unsigned char correct[32]; crypto_auth(correct,in,inlen,k); return crypto_verify_32(h,correct); } curvedns-curvedns-0.87/nacl/crypto_auth/hmacsha512256/used000066400000000000000000000000001150631715100233500ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_auth/measure.c000066400000000000000000000034151150631715100222160ustar00rootroot00000000000000#include "crypto_auth.h" #include "randombytes.h" #include "cpucycles.h" extern void printentry(long long,const char *,long long *,long long); extern unsigned char *alignedcalloc(unsigned long long); extern const char *primitiveimplementation; extern const char *implementationversion; extern const char *sizenames[]; extern const long long sizes[]; extern void allocate(void); extern void measure(void); const char *primitiveimplementation = crypto_auth_IMPLEMENTATION; const char *implementationversion = crypto_auth_VERSION; const char *sizenames[] = { "outputbytes", "keybytes", 0 }; const long long sizes[] = { crypto_auth_BYTES, crypto_auth_KEYBYTES }; #define MAXTEST_BYTES 4096 #ifdef SUPERCOP #define MGAP 8192 #else #define MGAP 8 #endif static unsigned char *k; static unsigned char *m; static unsigned char *h; void preallocate(void) { } void allocate(void) { k = alignedcalloc(crypto_auth_KEYBYTES); m = alignedcalloc(MAXTEST_BYTES); h = alignedcalloc(crypto_auth_BYTES); } #define TIMINGS 15 static long long cycles[TIMINGS + 1]; void measure(void) { int i; int loop; int mlen; for (loop = 0;loop < LOOPS;++loop) { for (mlen = 0;mlen <= MAXTEST_BYTES;mlen += 1 + mlen / MGAP) { randombytes(k,crypto_auth_KEYBYTES); randombytes(m,mlen); randombytes(h,crypto_auth_BYTES); for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_auth(h,m,mlen,k); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(mlen,"cycles",cycles,TIMINGS); for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_auth_verify(h,m,mlen,k); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(mlen,"verify_cycles",cycles,TIMINGS); } } } curvedns-curvedns-0.87/nacl/crypto_auth/try.c000066400000000000000000000100021150631715100213610ustar00rootroot00000000000000/* * crypto_auth/try.c version 20090118 * D. J. Bernstein * Public domain. */ #include "crypto_hash_sha256.h" #include "crypto_auth.h" extern unsigned char *alignedcalloc(unsigned long long); const char *primitiveimplementation = crypto_auth_IMPLEMENTATION; #define MAXTEST_BYTES 10000 #define CHECKSUM_BYTES 4096 #define TUNE_BYTES 1536 static unsigned char *h; static unsigned char *m; static unsigned char *k; static unsigned char *h2; static unsigned char *m2; static unsigned char *k2; void preallocate(void) { } void allocate(void) { h = alignedcalloc(crypto_auth_BYTES); m = alignedcalloc(MAXTEST_BYTES); k = alignedcalloc(crypto_auth_KEYBYTES); h2 = alignedcalloc(crypto_auth_BYTES); m2 = alignedcalloc(MAXTEST_BYTES + crypto_auth_BYTES); k2 = alignedcalloc(crypto_auth_KEYBYTES + crypto_auth_BYTES); } void predoit(void) { } void doit(void) { crypto_auth(h,m,TUNE_BYTES,k); crypto_auth_verify(h,m,TUNE_BYTES,k); } char checksum[crypto_auth_BYTES * 2 + 1]; const char *checksum_compute(void) { long long i; long long j; for (i = 0;i < CHECKSUM_BYTES;++i) { long long mlen = i; long long klen = crypto_auth_KEYBYTES; long long hlen = crypto_auth_BYTES; for (j = -16;j < 0;++j) h[j] = random(); for (j = -16;j < 0;++j) k[j] = random(); for (j = -16;j < 0;++j) m[j] = random(); for (j = hlen;j < hlen + 16;++j) h[j] = random(); for (j = klen;j < klen + 16;++j) k[j] = random(); for (j = mlen;j < mlen + 16;++j) m[j] = random(); for (j = -16;j < hlen + 16;++j) h2[j] = h[j]; for (j = -16;j < klen + 16;++j) k2[j] = k[j]; for (j = -16;j < mlen + 16;++j) m2[j] = m[j]; if (crypto_auth(h,m,mlen,k) != 0) return "crypto_auth returns nonzero"; for (j = -16;j < klen + 16;++j) if (k[j] != k2[j]) return "crypto_auth overwrites k"; for (j = -16;j < mlen + 16;++j) if (m[j] != m2[j]) return "crypto_auth overwrites m"; for (j = -16;j < 0;++j) if (h[j] != h2[j]) return "crypto_auth writes before output"; for (j = hlen;j < hlen + 16;++j) if (h[j] != h2[j]) return "crypto_auth writes after output"; for (j = -16;j < 0;++j) h[j] = random(); for (j = -16;j < 0;++j) k[j] = random(); for (j = -16;j < 0;++j) m[j] = random(); for (j = hlen;j < hlen + 16;++j) h[j] = random(); for (j = klen;j < klen + 16;++j) k[j] = random(); for (j = mlen;j < mlen + 16;++j) m[j] = random(); for (j = -16;j < hlen + 16;++j) h2[j] = h[j]; for (j = -16;j < klen + 16;++j) k2[j] = k[j]; for (j = -16;j < mlen + 16;++j) m2[j] = m[j]; if (crypto_auth(m2,m2,mlen,k) != 0) return "crypto_auth returns nonzero"; for (j = 0;j < hlen;++j) if (m2[j] != h[j]) return "crypto_auth does not handle m overlap"; for (j = 0;j < hlen;++j) m2[j] = m[j]; if (crypto_auth(k2,m2,mlen,k2) != 0) return "crypto_auth returns nonzero"; for (j = 0;j < hlen;++j) if (k2[j] != h[j]) return "crypto_auth does not handle k overlap"; for (j = 0;j < hlen;++j) k2[j] = k[j]; if (crypto_auth_verify(h,m,mlen,k) != 0) return "crypto_auth_verify returns nonzero"; for (j = -16;j < hlen + 16;++j) if (h[j] != h2[j]) return "crypto_auth overwrites h"; for (j = -16;j < klen + 16;++j) if (k[j] != k2[j]) return "crypto_auth overwrites k"; for (j = -16;j < mlen + 16;++j) if (m[j] != m2[j]) return "crypto_auth overwrites m"; crypto_hash_sha256(h2,h,hlen); for (j = 0;j < klen;++j) k[j] ^= h2[j % 32]; if (crypto_auth(h,m,mlen,k) != 0) return "crypto_auth returns nonzero"; if (crypto_auth_verify(h,m,mlen,k) != 0) return "crypto_auth_verify returns nonzero"; crypto_hash_sha256(h2,h,hlen); for (j = 0;j < mlen;++j) m[j] ^= h2[j % 32]; m[mlen] = h2[0]; } if (crypto_auth(h,m,CHECKSUM_BYTES,k) != 0) return "crypto_auth returns nonzero"; if (crypto_auth_verify(h,m,CHECKSUM_BYTES,k) != 0) return "crypto_auth_verify returns nonzero"; for (i = 0;i < crypto_auth_BYTES;++i) { checksum[2 * i] = "0123456789abcdef"[15 & (h[i] >> 4)]; checksum[2 * i + 1] = "0123456789abcdef"[15 & h[i]]; } checksum[2 * i] = 0; return 0; } curvedns-curvedns-0.87/nacl/crypto_auth/wrapper-auth.cpp000066400000000000000000000005611150631715100235330ustar00rootroot00000000000000#include using std::string; #include "crypto_auth.h" string crypto_auth(const string &m,const string &k) { if (k.size() != crypto_auth_KEYBYTES) throw "incorrect key length"; unsigned char a[crypto_auth_BYTES]; crypto_auth(a,(const unsigned char *) m.c_str(),m.size(),(const unsigned char *) k.c_str()); return string((char *) a,crypto_auth_BYTES); } curvedns-curvedns-0.87/nacl/crypto_auth/wrapper-verify.cpp000066400000000000000000000007571150631715100241050ustar00rootroot00000000000000#include using std::string; #include "crypto_auth.h" void crypto_auth_verify(const string &a,const string &m,const string &k) { if (k.size() != crypto_auth_KEYBYTES) throw "incorrect key length"; if (a.size() != crypto_auth_BYTES) throw "incorrect authenticator length"; if (crypto_auth_verify( (const unsigned char *) a.c_str(), (const unsigned char *) m.c_str(),m.size(), (const unsigned char *) k.c_str()) == 0) return; throw "invalid authenticator"; } curvedns-curvedns-0.87/nacl/crypto_box/000077500000000000000000000000001150631715100202355ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_box/curve25519xsalsa20poly1305/000077500000000000000000000000001150631715100245625ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_box/curve25519xsalsa20poly1305/checksum000066400000000000000000000000611150631715100263040ustar00rootroot000000000000005fac7400caabc14a99c5c0bc13fb1df5e468e870382a3a1c curvedns-curvedns-0.87/nacl/crypto_box/curve25519xsalsa20poly1305/ref/000077500000000000000000000000001150631715100253365ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_box/curve25519xsalsa20poly1305/ref/after.c000066400000000000000000000007651150631715100266130ustar00rootroot00000000000000#include "crypto_secretbox_xsalsa20poly1305.h" #include "crypto_box.h" int crypto_box_afternm( unsigned char *c, const unsigned char *m,unsigned long long mlen, const unsigned char *n, const unsigned char *k ) { return crypto_secretbox_xsalsa20poly1305(c,m,mlen,n,k); } int crypto_box_open_afternm( unsigned char *m, const unsigned char *c,unsigned long long clen, const unsigned char *n, const unsigned char *k ) { return crypto_secretbox_xsalsa20poly1305_open(m,c,clen,n,k); } curvedns-curvedns-0.87/nacl/crypto_box/curve25519xsalsa20poly1305/ref/api.h000066400000000000000000000002721150631715100262610ustar00rootroot00000000000000#define CRYPTO_PUBLICKEYBYTES 32 #define CRYPTO_SECRETKEYBYTES 32 #define CRYPTO_BEFORENMBYTES 32 #define CRYPTO_NONCEBYTES 24 #define CRYPTO_ZEROBYTES 32 #define CRYPTO_BOXZEROBYTES 16 curvedns-curvedns-0.87/nacl/crypto_box/curve25519xsalsa20poly1305/ref/before.c000066400000000000000000000006351150631715100267500ustar00rootroot00000000000000#include "crypto_core_hsalsa20.h" #include "crypto_scalarmult_curve25519.h" #include "crypto_box.h" static const unsigned char sigma[16] = "expand 32-byte k"; static const unsigned char n[16] = {0}; int crypto_box_beforenm( unsigned char *k, const unsigned char *pk, const unsigned char *sk ) { unsigned char s[32]; crypto_scalarmult_curve25519(s,sk,pk); return crypto_core_hsalsa20(k,n,s,sigma); } curvedns-curvedns-0.87/nacl/crypto_box/curve25519xsalsa20poly1305/ref/box.c000066400000000000000000000011521150631715100262710ustar00rootroot00000000000000#include "crypto_box.h" int crypto_box( unsigned char *c, const unsigned char *m,unsigned long long mlen, const unsigned char *n, const unsigned char *pk, const unsigned char *sk ) { unsigned char k[crypto_box_BEFORENMBYTES]; crypto_box_beforenm(k,pk,sk); return crypto_box_afternm(c,m,mlen,n,k); } int crypto_box_open( unsigned char *m, const unsigned char *c,unsigned long long clen, const unsigned char *n, const unsigned char *pk, const unsigned char *sk ) { unsigned char k[crypto_box_BEFORENMBYTES]; crypto_box_beforenm(k,pk,sk); return crypto_box_open_afternm(m,c,clen,n,k); } curvedns-curvedns-0.87/nacl/crypto_box/curve25519xsalsa20poly1305/ref/keypair.c000066400000000000000000000003541150631715100271500ustar00rootroot00000000000000#include "crypto_scalarmult_curve25519.h" #include "crypto_box.h" #include "randombytes.h" int crypto_box_keypair( unsigned char *pk, unsigned char *sk ) { randombytes(sk,32); return crypto_scalarmult_curve25519_base(pk,sk); } curvedns-curvedns-0.87/nacl/crypto_box/curve25519xsalsa20poly1305/selected000066400000000000000000000000001150631715100262630ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_box/curve25519xsalsa20poly1305/used000066400000000000000000000000001150631715100254330ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_box/measure.c000066400000000000000000000110251150631715100220410ustar00rootroot00000000000000#include #include "randombytes.h" #include "cpucycles.h" #include "crypto_box.h" extern void printentry(long long,const char *,long long *,long long); extern unsigned char *alignedcalloc(unsigned long long); extern const char *primitiveimplementation; extern const char *implementationversion; extern const char *sizenames[]; extern const long long sizes[]; extern void allocate(void); extern void measure(void); const char *primitiveimplementation = crypto_box_IMPLEMENTATION; const char *implementationversion = crypto_box_VERSION; const char *sizenames[] = { "publickeybytes", "secretkeybytes", "beforenmbytes", "noncebytes", "zerobytes", "boxzerobytes", 0 }; const long long sizes[] = { crypto_box_PUBLICKEYBYTES, crypto_box_SECRETKEYBYTES, crypto_box_BEFORENMBYTES, crypto_box_NONCEBYTES, crypto_box_ZEROBYTES, crypto_box_BOXZEROBYTES }; #define MAXTEST_BYTES 4096 static unsigned char *ska; static unsigned char *pka; static unsigned char *skb; static unsigned char *pkb; static unsigned char *n; static unsigned char *m; static unsigned char *c; static unsigned char *sa; static unsigned char *sb; void preallocate(void) { } void allocate(void) { ska = alignedcalloc(crypto_box_SECRETKEYBYTES); pka = alignedcalloc(crypto_box_PUBLICKEYBYTES); skb = alignedcalloc(crypto_box_SECRETKEYBYTES); pkb = alignedcalloc(crypto_box_PUBLICKEYBYTES); n = alignedcalloc(crypto_box_NONCEBYTES); m = alignedcalloc(MAXTEST_BYTES + crypto_box_ZEROBYTES); c = alignedcalloc(MAXTEST_BYTES + crypto_box_ZEROBYTES); sa = alignedcalloc(crypto_box_BEFORENMBYTES); sb = alignedcalloc(crypto_box_BEFORENMBYTES); } #define TIMINGS 15 static long long cycles[TIMINGS + 1]; void measure(void) { int i; int loop; int mlen; for (loop = 0;loop < LOOPS;++loop) { for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_box_keypair(pka,ska); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(-1,"keypair_cycles",cycles,TIMINGS); for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_box_keypair(pkb,skb); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(-1,"keypair_cycles",cycles,TIMINGS); for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_box_beforenm(sa,pkb,ska); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(-1,"beforenm_cycles",cycles,TIMINGS); for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_box_beforenm(sb,pka,skb); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(-1,"beforenm_cycles",cycles,TIMINGS); for (mlen = 0;mlen <= MAXTEST_BYTES;mlen += 1 + mlen / 8) { randombytes(n,crypto_box_NONCEBYTES); randombytes(m + crypto_box_ZEROBYTES,mlen); randombytes(c,mlen + crypto_box_ZEROBYTES); for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_box(c,m,mlen + crypto_box_ZEROBYTES,n,pka,skb); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(mlen,"cycles",cycles,TIMINGS); for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_box_open(m,c,mlen + crypto_box_ZEROBYTES,n,pkb,ska); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(mlen,"open_cycles",cycles,TIMINGS); ++c[crypto_box_ZEROBYTES]; for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_box_open(m,c,mlen + crypto_box_ZEROBYTES,n,pkb,ska); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(mlen,"forgery_open_cycles",cycles,TIMINGS); for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_box_afternm(c,m,mlen + crypto_box_ZEROBYTES,n,sb); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(mlen,"afternm_cycles",cycles,TIMINGS); for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_box_open_afternm(m,c,mlen + crypto_box_ZEROBYTES,n,sa); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(mlen,"open_afternm_cycles",cycles,TIMINGS); ++c[crypto_box_ZEROBYTES]; for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_box_open_afternm(m,c,mlen + crypto_box_ZEROBYTES,n,sa); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(mlen,"forgery_open_afternm_cycles",cycles,TIMINGS); } } } curvedns-curvedns-0.87/nacl/crypto_box/try.c000066400000000000000000000206121150631715100212200ustar00rootroot00000000000000/* * crypto_box/try.c version 20090118 * D. J. Bernstein * Public domain. */ #include "crypto_box.h" extern unsigned char *alignedcalloc(unsigned long long); const char *primitiveimplementation = crypto_box_IMPLEMENTATION; #define MAXTEST_BYTES 10000 #define CHECKSUM_BYTES 4096 #define TUNE_BYTES 1536 static unsigned char *ska; static unsigned char *pka; static unsigned char *skb; static unsigned char *pkb; static unsigned char *s; static unsigned char *n; static unsigned char *m; static unsigned char *c; static unsigned char *t; static unsigned char *ska2; static unsigned char *pka2; static unsigned char *skb2; static unsigned char *pkb2; static unsigned char *s2; static unsigned char *n2; static unsigned char *m2; static unsigned char *c2; static unsigned char *t2; #define sklen crypto_box_SECRETKEYBYTES #define pklen crypto_box_PUBLICKEYBYTES #define nlen crypto_box_NONCEBYTES #define slen crypto_box_BEFORENMBYTES void preallocate(void) { } void allocate(void) { ska = alignedcalloc(sklen); pka = alignedcalloc(pklen); skb = alignedcalloc(sklen); pkb = alignedcalloc(pklen); n = alignedcalloc(nlen); m = alignedcalloc(MAXTEST_BYTES + crypto_box_ZEROBYTES); c = alignedcalloc(MAXTEST_BYTES + crypto_box_ZEROBYTES); t = alignedcalloc(MAXTEST_BYTES + crypto_box_ZEROBYTES); s = alignedcalloc(slen); ska2 = alignedcalloc(sklen); pka2 = alignedcalloc(pklen); skb2 = alignedcalloc(sklen); pkb2 = alignedcalloc(pklen); n2 = alignedcalloc(nlen); m2 = alignedcalloc(MAXTEST_BYTES + crypto_box_ZEROBYTES); c2 = alignedcalloc(MAXTEST_BYTES + crypto_box_ZEROBYTES); t2 = alignedcalloc(MAXTEST_BYTES + crypto_box_ZEROBYTES); s2 = alignedcalloc(slen); } void predoit(void) { } void doit(void) { crypto_box(c,m,TUNE_BYTES + crypto_box_ZEROBYTES,n,pka,skb); crypto_box_open(t,c,TUNE_BYTES + crypto_box_ZEROBYTES,n,pkb,ska); } char checksum[nlen * 2 + 1]; const char *checksum_compute(void) { long long i; long long j; if (crypto_box_keypair(pka,ska) != 0) return "crypto_box_keypair returns nonzero"; if (crypto_box_keypair(pkb,skb) != 0) return "crypto_box_keypair returns nonzero"; for (j = 0;j < crypto_box_ZEROBYTES;++j) m[j] = 0; for (i = 0;i < CHECKSUM_BYTES;++i) { long long mlen = i + crypto_box_ZEROBYTES; long long tlen = i + crypto_box_ZEROBYTES; long long clen = i + crypto_box_ZEROBYTES; for (j = -16;j < 0;++j) ska[j] = random(); for (j = -16;j < 0;++j) skb[j] = random(); for (j = -16;j < 0;++j) pka[j] = random(); for (j = -16;j < 0;++j) pkb[j] = random(); for (j = -16;j < 0;++j) m[j] = random(); for (j = -16;j < 0;++j) n[j] = random(); for (j = sklen;j < sklen + 16;++j) ska[j] = random(); for (j = sklen;j < sklen + 16;++j) skb[j] = random(); for (j = pklen;j < pklen + 16;++j) pka[j] = random(); for (j = pklen;j < pklen + 16;++j) pkb[j] = random(); for (j = mlen;j < mlen + 16;++j) m[j] = random(); for (j = nlen;j < nlen + 16;++j) n[j] = random(); for (j = -16;j < sklen + 16;++j) ska2[j] = ska[j]; for (j = -16;j < sklen + 16;++j) skb2[j] = skb[j]; for (j = -16;j < pklen + 16;++j) pka2[j] = pka[j]; for (j = -16;j < pklen + 16;++j) pkb2[j] = pkb[j]; for (j = -16;j < mlen + 16;++j) m2[j] = m[j]; for (j = -16;j < nlen + 16;++j) n2[j] = n[j]; for (j = -16;j < clen + 16;++j) c2[j] = c[j] = random(); if (crypto_box(c,m,mlen,n,pkb,ska) != 0) return "crypto_box returns nonzero"; for (j = -16;j < mlen + 16;++j) if (m2[j] != m[j]) return "crypto_box overwrites m"; for (j = -16;j < nlen + 16;++j) if (n2[j] != n[j]) return "crypto_box overwrites n"; for (j = -16;j < 0;++j) if (c2[j] != c[j]) return "crypto_box writes before output"; for (j = clen;j < clen + 16;++j) if (c2[j] != c[j]) return "crypto_box writes after output"; for (j = 0;j < crypto_box_BOXZEROBYTES;++j) if (c[j] != 0) return "crypto_box does not clear extra bytes"; for (j = -16;j < sklen + 16;++j) if (ska2[j] != ska[j]) return "crypto_box overwrites ska"; for (j = -16;j < sklen + 16;++j) if (skb2[j] != skb[j]) return "crypto_box overwrites skb"; for (j = -16;j < pklen + 16;++j) if (pka2[j] != pka[j]) return "crypto_box overwrites pka"; for (j = -16;j < pklen + 16;++j) if (pkb2[j] != pkb[j]) return "crypto_box overwrites pkb"; for (j = -16;j < 0;++j) c[j] = random(); for (j = clen;j < clen + 16;++j) c[j] = random(); for (j = -16;j < clen + 16;++j) c2[j] = c[j]; for (j = -16;j < tlen + 16;++j) t2[j] = t[j] = random(); if (crypto_box_open(t,c,clen,n,pka,skb) != 0) return "crypto_box_open returns nonzero"; for (j = -16;j < clen + 16;++j) if (c2[j] != c[j]) return "crypto_box_open overwrites c"; for (j = -16;j < nlen + 16;++j) if (n2[j] != n[j]) return "crypto_box_open overwrites n"; for (j = -16;j < 0;++j) if (t2[j] != t[j]) return "crypto_box_open writes before output"; for (j = tlen;j < tlen + 16;++j) if (t2[j] != t[j]) return "crypto_box_open writes after output"; for (j = 0;j < crypto_box_ZEROBYTES;++j) if (t[j] != 0) return "crypto_box_open does not clear extra bytes"; for (j = -16;j < sklen + 16;++j) if (ska2[j] != ska[j]) return "crypto_box_open overwrites ska"; for (j = -16;j < sklen + 16;++j) if (skb2[j] != skb[j]) return "crypto_box_open overwrites skb"; for (j = -16;j < pklen + 16;++j) if (pka2[j] != pka[j]) return "crypto_box_open overwrites pka"; for (j = -16;j < pklen + 16;++j) if (pkb2[j] != pkb[j]) return "crypto_box_open overwrites pkb"; for (j = 0;j < mlen;++j) if (t[j] != m[j]) return "plaintext does not match"; for (j = -16;j < slen + 16;++j) s2[j] = s[j] = random(); if (crypto_box_beforenm(s,pkb,ska) != 0) return "crypto_box_beforenm returns nonzero"; for (j = -16;j < pklen + 16;++j) if (pka2[j] != pka[j]) return "crypto_box_open overwrites pk"; for (j = -16;j < sklen + 16;++j) if (skb2[j] != skb[j]) return "crypto_box_open overwrites sk"; for (j = -16;j < 0;++j) if (s2[j] != s[j]) return "crypto_box_beforenm writes before output"; for (j = slen;j < slen + 16;++j) if (s2[j] != s[j]) return "crypto_box_beforenm writes after output"; for (j = -16;j < slen + 16;++j) s2[j] = s[j]; for (j = -16;j < tlen + 16;++j) t2[j] = t[j] = random(); if (crypto_box_afternm(t,m,mlen,n,s) != 0) return "crypto_box_afternm returns nonzero"; for (j = -16;j < slen + 16;++j) if (s2[j] != s[j]) return "crypto_box_afternm overwrites s"; for (j = -16;j < mlen + 16;++j) if (m2[j] != m[j]) return "crypto_box_afternm overwrites m"; for (j = -16;j < nlen + 16;++j) if (n2[j] != n[j]) return "crypto_box_afternm overwrites n"; for (j = -16;j < 0;++j) if (t2[j] != t[j]) return "crypto_box_afternm writes before output"; for (j = tlen;j < tlen + 16;++j) if (t2[j] != t[j]) return "crypto_box_afternm writes after output"; for (j = 0;j < crypto_box_BOXZEROBYTES;++j) if (t[j] != 0) return "crypto_box_afternm does not clear extra bytes"; for (j = 0;j < mlen;++j) if (t[j] != c[j]) return "crypto_box_afternm does not match crypto_box"; if (crypto_box_beforenm(s,pka,skb) != 0) return "crypto_box_beforenm returns nonzero"; for (j = -16;j < tlen + 16;++j) t2[j] = t[j] = random(); if (crypto_box_open_afternm(t,c,clen,n,s) != 0) return "crypto_box_open_afternm returns nonzero"; for (j = -16;j < slen + 16;++j) if (s2[j] != s[j]) return "crypto_box_open_afternm overwrites s"; for (j = -16;j < mlen + 16;++j) if (m2[j] != m[j]) return "crypto_box_open_afternm overwrites m"; for (j = -16;j < nlen + 16;++j) if (n2[j] != n[j]) return "crypto_box_open_afternm overwrites n"; for (j = -16;j < 0;++j) if (t2[j] != t[j]) return "crypto_box_open_afternm writes before output"; for (j = tlen;j < tlen + 16;++j) if (t2[j] != t[j]) return "crypto_box_open_afternm writes after output"; for (j = 0;j < crypto_box_ZEROBYTES;++j) if (t[j] != 0) return "crypto_box_open_afternm does not clear extra bytes"; for (j = 0;j < mlen;++j) if (t[j] != m[j]) return "crypto_box_open_afternm does not match crypto_box_open"; for (j = 0;j < i;++j) n[j % nlen] ^= c[j + crypto_box_BOXZEROBYTES]; if (i == 0) m[crypto_box_ZEROBYTES] = 0; m[i + crypto_box_ZEROBYTES] = m[crypto_box_ZEROBYTES]; for (j = 0;j < i;++j) m[j + crypto_box_ZEROBYTES] ^= c[j + crypto_box_BOXZEROBYTES]; } for (i = 0;i < nlen;++i) { checksum[2 * i] = "0123456789abcdef"[15 & (n[i] >> 4)]; checksum[2 * i + 1] = "0123456789abcdef"[15 & n[i]]; } checksum[2 * i] = 0; return 0; } curvedns-curvedns-0.87/nacl/crypto_box/wrapper-box.cpp000066400000000000000000000016041150631715100232100ustar00rootroot00000000000000#include using std::string; #include "crypto_box.h" string crypto_box(const string &m,const string &n,const string &pk,const string &sk) { if (pk.size() != crypto_box_PUBLICKEYBYTES) throw "incorrect public-key length"; if (sk.size() != crypto_box_SECRETKEYBYTES) throw "incorrect secret-key length"; if (n.size() != crypto_box_NONCEBYTES) throw "incorrect nonce length"; size_t mlen = m.size() + crypto_box_ZEROBYTES; unsigned char mpad[mlen]; for (int i = 0;i < crypto_box_ZEROBYTES;++i) mpad[i] = 0; for (int i = crypto_box_ZEROBYTES;i < mlen;++i) mpad[i] = m[i - crypto_box_ZEROBYTES]; unsigned char cpad[mlen]; crypto_box(cpad,mpad,mlen, (const unsigned char *) n.c_str(), (const unsigned char *) pk.c_str(), (const unsigned char *) sk.c_str() ); return string( (char *) cpad + crypto_box_BOXZEROBYTES, mlen - crypto_box_BOXZEROBYTES ); } curvedns-curvedns-0.87/nacl/crypto_box/wrapper-keypair.cpp000066400000000000000000000005001150631715100240560ustar00rootroot00000000000000#include using std::string; #include "crypto_box.h" string crypto_box_keypair(string *sk_string) { unsigned char pk[crypto_box_PUBLICKEYBYTES]; unsigned char sk[crypto_box_SECRETKEYBYTES]; crypto_box_keypair(pk,sk); *sk_string = string((char *) sk,sizeof sk); return string((char *) pk,sizeof pk); } curvedns-curvedns-0.87/nacl/crypto_box/wrapper-open.cpp000066400000000000000000000021731150631715100233630ustar00rootroot00000000000000#include using std::string; #include "crypto_box.h" string crypto_box_open(const string &c,const string &n,const string &pk,const string &sk) { if (pk.size() != crypto_box_PUBLICKEYBYTES) throw "incorrect public-key length"; if (sk.size() != crypto_box_SECRETKEYBYTES) throw "incorrect secret-key length"; if (n.size() != crypto_box_NONCEBYTES) throw "incorrect nonce length"; size_t clen = c.size() + crypto_box_BOXZEROBYTES; unsigned char cpad[clen]; for (int i = 0;i < crypto_box_BOXZEROBYTES;++i) cpad[i] = 0; for (int i = crypto_box_BOXZEROBYTES;i < clen;++i) cpad[i] = c[i - crypto_box_BOXZEROBYTES]; unsigned char mpad[clen]; if (crypto_box_open(mpad,cpad,clen, (const unsigned char *) n.c_str(), (const unsigned char *) pk.c_str(), (const unsigned char *) sk.c_str() ) != 0) throw "ciphertext fails verification"; if (clen < crypto_box_ZEROBYTES) throw "ciphertext too short"; // should have been caught by _open return string( (char *) mpad + crypto_box_ZEROBYTES, clen - crypto_box_ZEROBYTES ); } curvedns-curvedns-0.87/nacl/crypto_core/000077500000000000000000000000001150631715100203755ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_core/hsalsa20/000077500000000000000000000000001150631715100220125ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_core/hsalsa20/checksum000066400000000000000000000001011150631715100235270ustar00rootroot0000000000000028ebe700b5878570702a68740aa131e6fa907e58a3f6915cd183c6db3f7afd7a curvedns-curvedns-0.87/nacl/crypto_core/hsalsa20/ref/000077500000000000000000000000001150631715100225665ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_core/hsalsa20/ref/api.h000066400000000000000000000001631150631715100235100ustar00rootroot00000000000000#define CRYPTO_OUTPUTBYTES 32 #define CRYPTO_INPUTBYTES 16 #define CRYPTO_KEYBYTES 32 #define CRYPTO_CONSTBYTES 16 curvedns-curvedns-0.87/nacl/crypto_core/hsalsa20/ref/core.c000066400000000000000000000063141150631715100236660ustar00rootroot00000000000000/* version 20080912 D. J. Bernstein Public domain. */ #include "crypto_core.h" #define ROUNDS 20 typedef unsigned int uint32; static uint32 rotate(uint32 u,int c) { return (u << c) | (u >> (32 - c)); } static uint32 load_littleendian(const unsigned char *x) { return (uint32) (x[0]) \ | (((uint32) (x[1])) << 8) \ | (((uint32) (x[2])) << 16) \ | (((uint32) (x[3])) << 24) ; } static void store_littleendian(unsigned char *x,uint32 u) { x[0] = u; u >>= 8; x[1] = u; u >>= 8; x[2] = u; u >>= 8; x[3] = u; } int crypto_core( unsigned char *out, const unsigned char *in, const unsigned char *k, const unsigned char *c ) { uint32 x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15; uint32 j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15; int i; j0 = x0 = load_littleendian(c + 0); j1 = x1 = load_littleendian(k + 0); j2 = x2 = load_littleendian(k + 4); j3 = x3 = load_littleendian(k + 8); j4 = x4 = load_littleendian(k + 12); j5 = x5 = load_littleendian(c + 4); j6 = x6 = load_littleendian(in + 0); j7 = x7 = load_littleendian(in + 4); j8 = x8 = load_littleendian(in + 8); j9 = x9 = load_littleendian(in + 12); j10 = x10 = load_littleendian(c + 8); j11 = x11 = load_littleendian(k + 16); j12 = x12 = load_littleendian(k + 20); j13 = x13 = load_littleendian(k + 24); j14 = x14 = load_littleendian(k + 28); j15 = x15 = load_littleendian(c + 12); for (i = ROUNDS;i > 0;i -= 2) { x4 ^= rotate( x0+x12, 7); x8 ^= rotate( x4+ x0, 9); x12 ^= rotate( x8+ x4,13); x0 ^= rotate(x12+ x8,18); x9 ^= rotate( x5+ x1, 7); x13 ^= rotate( x9+ x5, 9); x1 ^= rotate(x13+ x9,13); x5 ^= rotate( x1+x13,18); x14 ^= rotate(x10+ x6, 7); x2 ^= rotate(x14+x10, 9); x6 ^= rotate( x2+x14,13); x10 ^= rotate( x6+ x2,18); x3 ^= rotate(x15+x11, 7); x7 ^= rotate( x3+x15, 9); x11 ^= rotate( x7+ x3,13); x15 ^= rotate(x11+ x7,18); x1 ^= rotate( x0+ x3, 7); x2 ^= rotate( x1+ x0, 9); x3 ^= rotate( x2+ x1,13); x0 ^= rotate( x3+ x2,18); x6 ^= rotate( x5+ x4, 7); x7 ^= rotate( x6+ x5, 9); x4 ^= rotate( x7+ x6,13); x5 ^= rotate( x4+ x7,18); x11 ^= rotate(x10+ x9, 7); x8 ^= rotate(x11+x10, 9); x9 ^= rotate( x8+x11,13); x10 ^= rotate( x9+ x8,18); x12 ^= rotate(x15+x14, 7); x13 ^= rotate(x12+x15, 9); x14 ^= rotate(x13+x12,13); x15 ^= rotate(x14+x13,18); } x0 += j0; x1 += j1; x2 += j2; x3 += j3; x4 += j4; x5 += j5; x6 += j6; x7 += j7; x8 += j8; x9 += j9; x10 += j10; x11 += j11; x12 += j12; x13 += j13; x14 += j14; x15 += j15; x0 -= load_littleendian(c + 0); x5 -= load_littleendian(c + 4); x10 -= load_littleendian(c + 8); x15 -= load_littleendian(c + 12); x6 -= load_littleendian(in + 0); x7 -= load_littleendian(in + 4); x8 -= load_littleendian(in + 8); x9 -= load_littleendian(in + 12); store_littleendian(out + 0,x0); store_littleendian(out + 4,x5); store_littleendian(out + 8,x10); store_littleendian(out + 12,x15); store_littleendian(out + 16,x6); store_littleendian(out + 20,x7); store_littleendian(out + 24,x8); store_littleendian(out + 28,x9); return 0; } curvedns-curvedns-0.87/nacl/crypto_core/hsalsa20/ref2/000077500000000000000000000000001150631715100226505ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_core/hsalsa20/ref2/api.h000066400000000000000000000001631150631715100235720ustar00rootroot00000000000000#define CRYPTO_OUTPUTBYTES 32 #define CRYPTO_INPUTBYTES 16 #define CRYPTO_KEYBYTES 32 #define CRYPTO_CONSTBYTES 16 curvedns-curvedns-0.87/nacl/crypto_core/hsalsa20/ref2/core.c000066400000000000000000000051011150631715100237410ustar00rootroot00000000000000/* version 20080912 D. J. Bernstein Public domain. */ #include "crypto_core.h" #define ROUNDS 20 typedef unsigned int uint32; static uint32 rotate(uint32 u,int c) { return (u << c) | (u >> (32 - c)); } static uint32 load_littleendian(const unsigned char *x) { return (uint32) (x[0]) \ | (((uint32) (x[1])) << 8) \ | (((uint32) (x[2])) << 16) \ | (((uint32) (x[3])) << 24) ; } static void store_littleendian(unsigned char *x,uint32 u) { x[0] = u; u >>= 8; x[1] = u; u >>= 8; x[2] = u; u >>= 8; x[3] = u; } int crypto_core( unsigned char *out, const unsigned char *in, const unsigned char *k, const unsigned char *c ) { uint32 x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15; int i; x0 = load_littleendian(c + 0); x1 = load_littleendian(k + 0); x2 = load_littleendian(k + 4); x3 = load_littleendian(k + 8); x4 = load_littleendian(k + 12); x5 = load_littleendian(c + 4); x6 = load_littleendian(in + 0); x7 = load_littleendian(in + 4); x8 = load_littleendian(in + 8); x9 = load_littleendian(in + 12); x10 = load_littleendian(c + 8); x11 = load_littleendian(k + 16); x12 = load_littleendian(k + 20); x13 = load_littleendian(k + 24); x14 = load_littleendian(k + 28); x15 = load_littleendian(c + 12); for (i = ROUNDS;i > 0;i -= 2) { x4 ^= rotate( x0+x12, 7); x8 ^= rotate( x4+ x0, 9); x12 ^= rotate( x8+ x4,13); x0 ^= rotate(x12+ x8,18); x9 ^= rotate( x5+ x1, 7); x13 ^= rotate( x9+ x5, 9); x1 ^= rotate(x13+ x9,13); x5 ^= rotate( x1+x13,18); x14 ^= rotate(x10+ x6, 7); x2 ^= rotate(x14+x10, 9); x6 ^= rotate( x2+x14,13); x10 ^= rotate( x6+ x2,18); x3 ^= rotate(x15+x11, 7); x7 ^= rotate( x3+x15, 9); x11 ^= rotate( x7+ x3,13); x15 ^= rotate(x11+ x7,18); x1 ^= rotate( x0+ x3, 7); x2 ^= rotate( x1+ x0, 9); x3 ^= rotate( x2+ x1,13); x0 ^= rotate( x3+ x2,18); x6 ^= rotate( x5+ x4, 7); x7 ^= rotate( x6+ x5, 9); x4 ^= rotate( x7+ x6,13); x5 ^= rotate( x4+ x7,18); x11 ^= rotate(x10+ x9, 7); x8 ^= rotate(x11+x10, 9); x9 ^= rotate( x8+x11,13); x10 ^= rotate( x9+ x8,18); x12 ^= rotate(x15+x14, 7); x13 ^= rotate(x12+x15, 9); x14 ^= rotate(x13+x12,13); x15 ^= rotate(x14+x13,18); } store_littleendian(out + 0,x0); store_littleendian(out + 4,x5); store_littleendian(out + 8,x10); store_littleendian(out + 12,x15); store_littleendian(out + 16,x6); store_littleendian(out + 20,x7); store_littleendian(out + 24,x8); store_littleendian(out + 28,x9); return 0; } curvedns-curvedns-0.87/nacl/crypto_core/hsalsa20/used000066400000000000000000000000001150631715100226630ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_core/measure.c000066400000000000000000000006671150631715100222130ustar00rootroot00000000000000#include "crypto_core.h" const char *primitiveimplementation = crypto_core_IMPLEMENTATION; const char *implementationversion = crypto_core_VERSION; const char *sizenames[] = { "outputbytes", "inputbytes", "keybytes", "constbytes", 0 }; const long long sizes[] = { crypto_core_OUTPUTBYTES, crypto_core_INPUTBYTES, crypto_core_KEYBYTES, crypto_core_CONSTBYTES }; void preallocate(void) { } void allocate(void) { } void measure(void) { } curvedns-curvedns-0.87/nacl/crypto_core/salsa20/000077500000000000000000000000001150631715100216425ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_core/salsa20/checksum000066400000000000000000000002011150631715100233600ustar00rootroot000000000000009d1ee8d84b974e648507ffd93829376c5b4420751710e44f6593abd8769378011d85ecda51ceb8f43661d3c65ef5b57c4f5bf8df76c8202784c8df8def61e6a6 curvedns-curvedns-0.87/nacl/crypto_core/salsa20/ref/000077500000000000000000000000001150631715100224165ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_core/salsa20/ref/api.h000066400000000000000000000001631150631715100233400ustar00rootroot00000000000000#define CRYPTO_OUTPUTBYTES 64 #define CRYPTO_INPUTBYTES 16 #define CRYPTO_KEYBYTES 32 #define CRYPTO_CONSTBYTES 16 curvedns-curvedns-0.87/nacl/crypto_core/salsa20/ref/core.c000066400000000000000000000063171150631715100235210ustar00rootroot00000000000000/* version 20080912 D. J. Bernstein Public domain. */ #include "crypto_core.h" #define ROUNDS 20 typedef unsigned int uint32; static uint32 rotate(uint32 u,int c) { return (u << c) | (u >> (32 - c)); } static uint32 load_littleendian(const unsigned char *x) { return (uint32) (x[0]) \ | (((uint32) (x[1])) << 8) \ | (((uint32) (x[2])) << 16) \ | (((uint32) (x[3])) << 24) ; } static void store_littleendian(unsigned char *x,uint32 u) { x[0] = u; u >>= 8; x[1] = u; u >>= 8; x[2] = u; u >>= 8; x[3] = u; } int crypto_core( unsigned char *out, const unsigned char *in, const unsigned char *k, const unsigned char *c ) { uint32 x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15; uint32 j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15; int i; j0 = x0 = load_littleendian(c + 0); j1 = x1 = load_littleendian(k + 0); j2 = x2 = load_littleendian(k + 4); j3 = x3 = load_littleendian(k + 8); j4 = x4 = load_littleendian(k + 12); j5 = x5 = load_littleendian(c + 4); j6 = x6 = load_littleendian(in + 0); j7 = x7 = load_littleendian(in + 4); j8 = x8 = load_littleendian(in + 8); j9 = x9 = load_littleendian(in + 12); j10 = x10 = load_littleendian(c + 8); j11 = x11 = load_littleendian(k + 16); j12 = x12 = load_littleendian(k + 20); j13 = x13 = load_littleendian(k + 24); j14 = x14 = load_littleendian(k + 28); j15 = x15 = load_littleendian(c + 12); for (i = ROUNDS;i > 0;i -= 2) { x4 ^= rotate( x0+x12, 7); x8 ^= rotate( x4+ x0, 9); x12 ^= rotate( x8+ x4,13); x0 ^= rotate(x12+ x8,18); x9 ^= rotate( x5+ x1, 7); x13 ^= rotate( x9+ x5, 9); x1 ^= rotate(x13+ x9,13); x5 ^= rotate( x1+x13,18); x14 ^= rotate(x10+ x6, 7); x2 ^= rotate(x14+x10, 9); x6 ^= rotate( x2+x14,13); x10 ^= rotate( x6+ x2,18); x3 ^= rotate(x15+x11, 7); x7 ^= rotate( x3+x15, 9); x11 ^= rotate( x7+ x3,13); x15 ^= rotate(x11+ x7,18); x1 ^= rotate( x0+ x3, 7); x2 ^= rotate( x1+ x0, 9); x3 ^= rotate( x2+ x1,13); x0 ^= rotate( x3+ x2,18); x6 ^= rotate( x5+ x4, 7); x7 ^= rotate( x6+ x5, 9); x4 ^= rotate( x7+ x6,13); x5 ^= rotate( x4+ x7,18); x11 ^= rotate(x10+ x9, 7); x8 ^= rotate(x11+x10, 9); x9 ^= rotate( x8+x11,13); x10 ^= rotate( x9+ x8,18); x12 ^= rotate(x15+x14, 7); x13 ^= rotate(x12+x15, 9); x14 ^= rotate(x13+x12,13); x15 ^= rotate(x14+x13,18); } x0 += j0; x1 += j1; x2 += j2; x3 += j3; x4 += j4; x5 += j5; x6 += j6; x7 += j7; x8 += j8; x9 += j9; x10 += j10; x11 += j11; x12 += j12; x13 += j13; x14 += j14; x15 += j15; store_littleendian(out + 0,x0); store_littleendian(out + 4,x1); store_littleendian(out + 8,x2); store_littleendian(out + 12,x3); store_littleendian(out + 16,x4); store_littleendian(out + 20,x5); store_littleendian(out + 24,x6); store_littleendian(out + 28,x7); store_littleendian(out + 32,x8); store_littleendian(out + 36,x9); store_littleendian(out + 40,x10); store_littleendian(out + 44,x11); store_littleendian(out + 48,x12); store_littleendian(out + 52,x13); store_littleendian(out + 56,x14); store_littleendian(out + 60,x15); return 0; } curvedns-curvedns-0.87/nacl/crypto_core/salsa20/used000066400000000000000000000000001150631715100225130ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_core/salsa2012/000077500000000000000000000000001150631715100220055ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_core/salsa2012/checksum000066400000000000000000000002011150631715100235230ustar00rootroot00000000000000f36d643f798efc0fca888d3ac4bdcc54c98a968c2da16bd5b8bfe9fe9025a6ca3a207e9362dc7cf17ddfc7477ee754d3f521b1df91640093754f7275b1a54293 curvedns-curvedns-0.87/nacl/crypto_core/salsa2012/ref/000077500000000000000000000000001150631715100225615ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_core/salsa2012/ref/api.h000066400000000000000000000001631150631715100235030ustar00rootroot00000000000000#define CRYPTO_OUTPUTBYTES 64 #define CRYPTO_INPUTBYTES 16 #define CRYPTO_KEYBYTES 32 #define CRYPTO_CONSTBYTES 16 curvedns-curvedns-0.87/nacl/crypto_core/salsa2012/ref/core.c000066400000000000000000000063171150631715100236640ustar00rootroot00000000000000/* version 20080913 D. J. Bernstein Public domain. */ #include "crypto_core.h" #define ROUNDS 12 typedef unsigned int uint32; static uint32 rotate(uint32 u,int c) { return (u << c) | (u >> (32 - c)); } static uint32 load_littleendian(const unsigned char *x) { return (uint32) (x[0]) \ | (((uint32) (x[1])) << 8) \ | (((uint32) (x[2])) << 16) \ | (((uint32) (x[3])) << 24) ; } static void store_littleendian(unsigned char *x,uint32 u) { x[0] = u; u >>= 8; x[1] = u; u >>= 8; x[2] = u; u >>= 8; x[3] = u; } int crypto_core( unsigned char *out, const unsigned char *in, const unsigned char *k, const unsigned char *c ) { uint32 x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15; uint32 j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15; int i; j0 = x0 = load_littleendian(c + 0); j1 = x1 = load_littleendian(k + 0); j2 = x2 = load_littleendian(k + 4); j3 = x3 = load_littleendian(k + 8); j4 = x4 = load_littleendian(k + 12); j5 = x5 = load_littleendian(c + 4); j6 = x6 = load_littleendian(in + 0); j7 = x7 = load_littleendian(in + 4); j8 = x8 = load_littleendian(in + 8); j9 = x9 = load_littleendian(in + 12); j10 = x10 = load_littleendian(c + 8); j11 = x11 = load_littleendian(k + 16); j12 = x12 = load_littleendian(k + 20); j13 = x13 = load_littleendian(k + 24); j14 = x14 = load_littleendian(k + 28); j15 = x15 = load_littleendian(c + 12); for (i = ROUNDS;i > 0;i -= 2) { x4 ^= rotate( x0+x12, 7); x8 ^= rotate( x4+ x0, 9); x12 ^= rotate( x8+ x4,13); x0 ^= rotate(x12+ x8,18); x9 ^= rotate( x5+ x1, 7); x13 ^= rotate( x9+ x5, 9); x1 ^= rotate(x13+ x9,13); x5 ^= rotate( x1+x13,18); x14 ^= rotate(x10+ x6, 7); x2 ^= rotate(x14+x10, 9); x6 ^= rotate( x2+x14,13); x10 ^= rotate( x6+ x2,18); x3 ^= rotate(x15+x11, 7); x7 ^= rotate( x3+x15, 9); x11 ^= rotate( x7+ x3,13); x15 ^= rotate(x11+ x7,18); x1 ^= rotate( x0+ x3, 7); x2 ^= rotate( x1+ x0, 9); x3 ^= rotate( x2+ x1,13); x0 ^= rotate( x3+ x2,18); x6 ^= rotate( x5+ x4, 7); x7 ^= rotate( x6+ x5, 9); x4 ^= rotate( x7+ x6,13); x5 ^= rotate( x4+ x7,18); x11 ^= rotate(x10+ x9, 7); x8 ^= rotate(x11+x10, 9); x9 ^= rotate( x8+x11,13); x10 ^= rotate( x9+ x8,18); x12 ^= rotate(x15+x14, 7); x13 ^= rotate(x12+x15, 9); x14 ^= rotate(x13+x12,13); x15 ^= rotate(x14+x13,18); } x0 += j0; x1 += j1; x2 += j2; x3 += j3; x4 += j4; x5 += j5; x6 += j6; x7 += j7; x8 += j8; x9 += j9; x10 += j10; x11 += j11; x12 += j12; x13 += j13; x14 += j14; x15 += j15; store_littleendian(out + 0,x0); store_littleendian(out + 4,x1); store_littleendian(out + 8,x2); store_littleendian(out + 12,x3); store_littleendian(out + 16,x4); store_littleendian(out + 20,x5); store_littleendian(out + 24,x6); store_littleendian(out + 28,x7); store_littleendian(out + 32,x8); store_littleendian(out + 36,x9); store_littleendian(out + 40,x10); store_littleendian(out + 44,x11); store_littleendian(out + 48,x12); store_littleendian(out + 52,x13); store_littleendian(out + 56,x14); store_littleendian(out + 60,x15); return 0; } curvedns-curvedns-0.87/nacl/crypto_core/salsa2012/used000066400000000000000000000000001150631715100226560ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_core/salsa208/000077500000000000000000000000001150631715100217325ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_core/salsa208/checksum000066400000000000000000000002011150631715100234500ustar00rootroot000000000000001e13ea9e74cb36989f7cbf4abc80b29154e1a8b150bd5244951318abea002a93ae9fe2abbcf7217526ac2a85b66c256ba9374b1257eda0c01816da328edfa11a curvedns-curvedns-0.87/nacl/crypto_core/salsa208/ref/000077500000000000000000000000001150631715100225065ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_core/salsa208/ref/api.h000066400000000000000000000001631150631715100234300ustar00rootroot00000000000000#define CRYPTO_OUTPUTBYTES 64 #define CRYPTO_INPUTBYTES 16 #define CRYPTO_KEYBYTES 32 #define CRYPTO_CONSTBYTES 16 curvedns-curvedns-0.87/nacl/crypto_core/salsa208/ref/core.c000066400000000000000000000063161150631715100236100ustar00rootroot00000000000000/* version 20080913 D. J. Bernstein Public domain. */ #include "crypto_core.h" #define ROUNDS 8 typedef unsigned int uint32; static uint32 rotate(uint32 u,int c) { return (u << c) | (u >> (32 - c)); } static uint32 load_littleendian(const unsigned char *x) { return (uint32) (x[0]) \ | (((uint32) (x[1])) << 8) \ | (((uint32) (x[2])) << 16) \ | (((uint32) (x[3])) << 24) ; } static void store_littleendian(unsigned char *x,uint32 u) { x[0] = u; u >>= 8; x[1] = u; u >>= 8; x[2] = u; u >>= 8; x[3] = u; } int crypto_core( unsigned char *out, const unsigned char *in, const unsigned char *k, const unsigned char *c ) { uint32 x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15; uint32 j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15; int i; j0 = x0 = load_littleendian(c + 0); j1 = x1 = load_littleendian(k + 0); j2 = x2 = load_littleendian(k + 4); j3 = x3 = load_littleendian(k + 8); j4 = x4 = load_littleendian(k + 12); j5 = x5 = load_littleendian(c + 4); j6 = x6 = load_littleendian(in + 0); j7 = x7 = load_littleendian(in + 4); j8 = x8 = load_littleendian(in + 8); j9 = x9 = load_littleendian(in + 12); j10 = x10 = load_littleendian(c + 8); j11 = x11 = load_littleendian(k + 16); j12 = x12 = load_littleendian(k + 20); j13 = x13 = load_littleendian(k + 24); j14 = x14 = load_littleendian(k + 28); j15 = x15 = load_littleendian(c + 12); for (i = ROUNDS;i > 0;i -= 2) { x4 ^= rotate( x0+x12, 7); x8 ^= rotate( x4+ x0, 9); x12 ^= rotate( x8+ x4,13); x0 ^= rotate(x12+ x8,18); x9 ^= rotate( x5+ x1, 7); x13 ^= rotate( x9+ x5, 9); x1 ^= rotate(x13+ x9,13); x5 ^= rotate( x1+x13,18); x14 ^= rotate(x10+ x6, 7); x2 ^= rotate(x14+x10, 9); x6 ^= rotate( x2+x14,13); x10 ^= rotate( x6+ x2,18); x3 ^= rotate(x15+x11, 7); x7 ^= rotate( x3+x15, 9); x11 ^= rotate( x7+ x3,13); x15 ^= rotate(x11+ x7,18); x1 ^= rotate( x0+ x3, 7); x2 ^= rotate( x1+ x0, 9); x3 ^= rotate( x2+ x1,13); x0 ^= rotate( x3+ x2,18); x6 ^= rotate( x5+ x4, 7); x7 ^= rotate( x6+ x5, 9); x4 ^= rotate( x7+ x6,13); x5 ^= rotate( x4+ x7,18); x11 ^= rotate(x10+ x9, 7); x8 ^= rotate(x11+x10, 9); x9 ^= rotate( x8+x11,13); x10 ^= rotate( x9+ x8,18); x12 ^= rotate(x15+x14, 7); x13 ^= rotate(x12+x15, 9); x14 ^= rotate(x13+x12,13); x15 ^= rotate(x14+x13,18); } x0 += j0; x1 += j1; x2 += j2; x3 += j3; x4 += j4; x5 += j5; x6 += j6; x7 += j7; x8 += j8; x9 += j9; x10 += j10; x11 += j11; x12 += j12; x13 += j13; x14 += j14; x15 += j15; store_littleendian(out + 0,x0); store_littleendian(out + 4,x1); store_littleendian(out + 8,x2); store_littleendian(out + 12,x3); store_littleendian(out + 16,x4); store_littleendian(out + 20,x5); store_littleendian(out + 24,x6); store_littleendian(out + 28,x7); store_littleendian(out + 32,x8); store_littleendian(out + 36,x9); store_littleendian(out + 40,x10); store_littleendian(out + 44,x11); store_littleendian(out + 48,x12); store_littleendian(out + 52,x13); store_littleendian(out + 56,x14); store_littleendian(out + 60,x15); return 0; } curvedns-curvedns-0.87/nacl/crypto_core/salsa208/used000066400000000000000000000000001150631715100226030ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_core/try.c000066400000000000000000000073731150631715100213710ustar00rootroot00000000000000/* * crypto_core/try.c version 20090118 * D. J. Bernstein * Public domain. */ #include #include "crypto_core.h" extern unsigned char *alignedcalloc(unsigned long long); const char *primitiveimplementation = crypto_core_IMPLEMENTATION; static unsigned char *h; static unsigned char *n; static unsigned char *k; static unsigned char *c; static unsigned char *h2; static unsigned char *n2; static unsigned char *k2; static unsigned char *c2; #define hlen crypto_core_OUTPUTBYTES #define nlen crypto_core_INPUTBYTES #define klen crypto_core_KEYBYTES #define clen crypto_core_CONSTBYTES void preallocate(void) { } void allocate(void) { h = alignedcalloc(hlen); n = alignedcalloc(nlen); k = alignedcalloc(klen); c = alignedcalloc(clen); h2 = alignedcalloc(hlen); n2 = alignedcalloc(nlen + crypto_core_OUTPUTBYTES); k2 = alignedcalloc(klen + crypto_core_OUTPUTBYTES); c2 = alignedcalloc(clen + crypto_core_OUTPUTBYTES); } void predoit(void) { } void doit(void) { crypto_core(h,n,k,c); } static unsigned char newbyte(void) { unsigned long long x; long long j; x = 8675309; for (j = 0;j < hlen;++j) { x += h[j]; x *= x; x += (x >> 31); } for (j = 0;j < nlen;++j) { x += n[j]; x *= x; x += (x >> 31); } for (j = 0;j < klen;++j) { x += k[j]; x *= x; x += (x >> 31); } for (j = 0;j < clen;++j) { x += c[j]; x *= x; x += (x >> 31); } for (j = 0;j < 100;++j) { x += j ; x *= x; x += (x >> 31); } return x; } char checksum[hlen * 2 + 1]; const char *checksum_compute(void) { long long i; long long j; for (i = 0;i < 100;++i) { for (j = -16;j < 0;++j) h[j] = random(); for (j = hlen;j < hlen + 16;++j) h[j] = random(); for (j = -16;j < hlen + 16;++j) h2[j] = h[j]; for (j = -16;j < 0;++j) n[j] = random(); for (j = nlen;j < nlen + 16;++j) n[j] = random(); for (j = -16;j < nlen + 16;++j) n2[j] = n[j]; for (j = -16;j < 0;++j) k[j] = random(); for (j = klen;j < klen + 16;++j) k[j] = random(); for (j = -16;j < klen + 16;++j) k2[j] = k[j]; for (j = -16;j < 0;++j) c[j] = random(); for (j = clen;j < clen + 16;++j) c[j] = random(); for (j = -16;j < clen + 16;++j) c2[j] = c[j]; if (crypto_core(h,n,k,c) != 0) return "crypto_core returns nonzero"; for (j = -16;j < 0;++j) if (h2[j] != h[j]) return "crypto_core writes before output"; for (j = hlen;j < hlen + 16;++j) if (h2[j] != h[j]) return "crypto_core writes after output"; for (j = -16;j < klen + 16;++j) if (k2[j] != k[j]) return "crypto_core writes to k"; for (j = -16;j < nlen + 16;++j) if (n2[j] != n[j]) return "crypto_core writes to n"; for (j = -16;j < clen + 16;++j) if (c2[j] != c[j]) return "crypto_core writes to c"; if (crypto_core(n2,n2,k,c) != 0) return "crypto_core returns nonzero"; for (j = 0;j < hlen;++j) if (h[j] != n2[j]) return "crypto_core does not handle n overlap"; for (j = 0;j < hlen;++j) n2[j] = n[j]; if (crypto_core(k2,n2,k2,c) != 0) return "crypto_core returns nonzero"; for (j = 0;j < hlen;++j) if (h[j] != k2[j]) return "crypto_core does not handle k overlap"; for (j = 0;j < hlen;++j) k2[j] = k[j]; if (crypto_core(c2,n2,k2,c2) != 0) return "crypto_core returns nonzero"; for (j = 0;j < hlen;++j) if (h[j] != c2[j]) return "crypto_core does not handle c overlap"; for (j = 0;j < hlen;++j) c2[j] = c[j]; for (j = 0;j < nlen;++j) n[j] = newbyte(); if (crypto_core(h,n,k,c) != 0) return "crypto_core returns nonzero"; for (j = 0;j < klen;++j) k[j] = newbyte(); if (crypto_core(h,n,k,c) != 0) return "crypto_core returns nonzero"; for (j = 0;j < clen;++j) c[j] = newbyte(); } for (i = 0;i < hlen;++i) { checksum[2 * i] = "0123456789abcdef"[15 & (h[i] >> 4)]; checksum[2 * i + 1] = "0123456789abcdef"[15 & h[i]]; } checksum[2 * i] = 0; return 0; } curvedns-curvedns-0.87/nacl/crypto_core/wrapper-empty.cpp000066400000000000000000000000001150631715100237030ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_hash/000077500000000000000000000000001150631715100203705ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_hash/measure.c000066400000000000000000000026761150631715100222100ustar00rootroot00000000000000#include #include "randombytes.h" #include "cpucycles.h" #include "crypto_hash.h" extern void printentry(long long,const char *,long long *,long long); extern unsigned char *alignedcalloc(unsigned long long); extern const char *primitiveimplementation; extern const char *implementationversion; extern const char *sizenames[]; extern const long long sizes[]; extern void allocate(void); extern void measure(void); const char *primitiveimplementation = crypto_hash_IMPLEMENTATION; const char *implementationversion = crypto_hash_VERSION; const char *sizenames[] = { "outputbytes", 0 }; const long long sizes[] = { crypto_hash_BYTES }; #define MAXTEST_BYTES 4096 #ifdef SUPERCOP #define MGAP 8192 #else #define MGAP 8 #endif static unsigned char *h; static unsigned char *m; void preallocate(void) { } void allocate(void) { h = alignedcalloc(crypto_hash_BYTES); m = alignedcalloc(MAXTEST_BYTES); } #define TIMINGS 15 static long long cycles[TIMINGS + 1]; static void printcycles(long long mlen) { int i; for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(mlen,"cycles",cycles,TIMINGS); } void measure(void) { int i; int loop; int mlen; for (loop = 0;loop < LOOPS;++loop) { for (mlen = 0;mlen <= MAXTEST_BYTES;mlen += 1 + mlen / MGAP) { randombytes(m,mlen); for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_hash(h,m,mlen); } printcycles(mlen); } } } curvedns-curvedns-0.87/nacl/crypto_hash/sha256/000077500000000000000000000000001150631715100214005ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_hash/sha256/checksum000066400000000000000000000001011150631715100231150ustar00rootroot0000000000000086df8bd202b2a2b5fdc04a7f50a591e43a345849c12fef08d487109648a08e05 curvedns-curvedns-0.87/nacl/crypto_hash/sha256/ref/000077500000000000000000000000001150631715100221545ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_hash/sha256/ref/api.h000066400000000000000000000000301150631715100230670ustar00rootroot00000000000000#define CRYPTO_BYTES 32 curvedns-curvedns-0.87/nacl/crypto_hash/sha256/ref/hash.c000066400000000000000000000026761150631715100232560ustar00rootroot00000000000000/* 20080913 D. J. Bernstein Public domain. */ #include "crypto_hashblocks_sha256.h" #include "crypto_hash.h" #define blocks crypto_hashblocks_sha256 typedef unsigned int uint32; static const char iv[32] = { 0x6a,0x09,0xe6,0x67, 0xbb,0x67,0xae,0x85, 0x3c,0x6e,0xf3,0x72, 0xa5,0x4f,0xf5,0x3a, 0x51,0x0e,0x52,0x7f, 0x9b,0x05,0x68,0x8c, 0x1f,0x83,0xd9,0xab, 0x5b,0xe0,0xcd,0x19, } ; int crypto_hash(unsigned char *out,const unsigned char *in,unsigned long long inlen) { unsigned char h[32]; unsigned char padded[128]; int i; unsigned long long bits = inlen << 3; for (i = 0;i < 32;++i) h[i] = iv[i]; blocks(h,in,inlen); in += inlen; inlen &= 63; in -= inlen; for (i = 0;i < inlen;++i) padded[i] = in[i]; padded[inlen] = 0x80; if (inlen < 56) { for (i = inlen + 1;i < 56;++i) padded[i] = 0; padded[56] = bits >> 56; padded[57] = bits >> 48; padded[58] = bits >> 40; padded[59] = bits >> 32; padded[60] = bits >> 24; padded[61] = bits >> 16; padded[62] = bits >> 8; padded[63] = bits; blocks(h,padded,64); } else { for (i = inlen + 1;i < 120;++i) padded[i] = 0; padded[120] = bits >> 56; padded[121] = bits >> 48; padded[122] = bits >> 40; padded[123] = bits >> 32; padded[124] = bits >> 24; padded[125] = bits >> 16; padded[126] = bits >> 8; padded[127] = bits; blocks(h,padded,128); } for (i = 0;i < 32;++i) out[i] = h[i]; return 0; } curvedns-curvedns-0.87/nacl/crypto_hash/sha256/used000066400000000000000000000000001150631715100222510ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_hash/sha512/000077500000000000000000000000001150631715100213735ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_hash/sha512/checksum000066400000000000000000000002011150631715100231110ustar00rootroot000000000000009a2a989e136a02c3362c98e6e1e0b52fab980a1dafbebe4dd5e44d15d061742e35fb686befd4e33c608d251c96e26c020f90d92bb7ec8a657f79bb8e0b00a473 curvedns-curvedns-0.87/nacl/crypto_hash/sha512/ref/000077500000000000000000000000001150631715100221475ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_hash/sha512/ref/api.h000066400000000000000000000000301150631715100230620ustar00rootroot00000000000000#define CRYPTO_BYTES 64 curvedns-curvedns-0.87/nacl/crypto_hash/sha512/ref/hash.c000066400000000000000000000033141150631715100232370ustar00rootroot00000000000000/* 20080913 D. J. Bernstein Public domain. */ #include "crypto_hashblocks_sha512.h" #include "crypto_hash.h" #define blocks crypto_hashblocks_sha512 static const unsigned char iv[64] = { 0x6a,0x09,0xe6,0x67,0xf3,0xbc,0xc9,0x08, 0xbb,0x67,0xae,0x85,0x84,0xca,0xa7,0x3b, 0x3c,0x6e,0xf3,0x72,0xfe,0x94,0xf8,0x2b, 0xa5,0x4f,0xf5,0x3a,0x5f,0x1d,0x36,0xf1, 0x51,0x0e,0x52,0x7f,0xad,0xe6,0x82,0xd1, 0x9b,0x05,0x68,0x8c,0x2b,0x3e,0x6c,0x1f, 0x1f,0x83,0xd9,0xab,0xfb,0x41,0xbd,0x6b, 0x5b,0xe0,0xcd,0x19,0x13,0x7e,0x21,0x79 } ; typedef unsigned long long uint64; int crypto_hash(unsigned char *out,const unsigned char *in,unsigned long long inlen) { unsigned char h[64]; unsigned char padded[256]; int i; unsigned long long bytes = inlen; for (i = 0;i < 64;++i) h[i] = iv[i]; blocks(h,in,inlen); in += inlen; inlen &= 127; in -= inlen; for (i = 0;i < inlen;++i) padded[i] = in[i]; padded[inlen] = 0x80; if (inlen < 112) { for (i = inlen + 1;i < 119;++i) padded[i] = 0; padded[119] = bytes >> 61; padded[120] = bytes >> 53; padded[121] = bytes >> 45; padded[122] = bytes >> 37; padded[123] = bytes >> 29; padded[124] = bytes >> 21; padded[125] = bytes >> 13; padded[126] = bytes >> 5; padded[127] = bytes << 3; blocks(h,padded,128); } else { for (i = inlen + 1;i < 247;++i) padded[i] = 0; padded[247] = bytes >> 61; padded[248] = bytes >> 53; padded[249] = bytes >> 45; padded[250] = bytes >> 37; padded[251] = bytes >> 29; padded[252] = bytes >> 21; padded[253] = bytes >> 13; padded[254] = bytes >> 5; padded[255] = bytes << 3; blocks(h,padded,256); } for (i = 0;i < 64;++i) out[i] = h[i]; return 0; } curvedns-curvedns-0.87/nacl/crypto_hash/sha512/selected000066400000000000000000000000001150631715100230740ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_hash/sha512/used000066400000000000000000000000001150631715100222440ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_hash/try.c000066400000000000000000000041311150631715100213510ustar00rootroot00000000000000/* * crypto_hash/try.c version 20090118 * D. J. Bernstein * Public domain. */ #include #include "crypto_hash.h" extern unsigned char *alignedcalloc(unsigned long long); const char *primitiveimplementation = crypto_hash_IMPLEMENTATION; #define MAXTEST_BYTES (10000 + crypto_hash_BYTES) #define CHECKSUM_BYTES 4096 #define TUNE_BYTES 1536 static unsigned char *h; static unsigned char *h2; static unsigned char *m; static unsigned char *m2; void preallocate(void) { } void allocate(void) { h = alignedcalloc(crypto_hash_BYTES); h2 = alignedcalloc(crypto_hash_BYTES); m = alignedcalloc(MAXTEST_BYTES); m2 = alignedcalloc(MAXTEST_BYTES); } void predoit(void) { } void doit(void) { crypto_hash(h,m,TUNE_BYTES); } char checksum[crypto_hash_BYTES * 2 + 1]; const char *checksum_compute(void) { long long i; long long j; for (i = 0;i < CHECKSUM_BYTES;++i) { long long hlen = crypto_hash_BYTES; long long mlen = i; for (j = -16;j < 0;++j) h[j] = random(); for (j = hlen;j < hlen + 16;++j) h[j] = random(); for (j = -16;j < hlen + 16;++j) h2[j] = h[j]; for (j = -16;j < 0;++j) m[j] = random(); for (j = mlen;j < mlen + 16;++j) m[j] = random(); for (j = -16;j < mlen + 16;++j) m2[j] = m[j]; if (crypto_hash(h,m,mlen) != 0) return "crypto_hash returns nonzero"; for (j = -16;j < mlen + 16;++j) if (m2[j] != m[j]) return "crypto_hash writes to input"; for (j = -16;j < 0;++j) if (h2[j] != h[j]) return "crypto_hash writes before output"; for (j = hlen;j < hlen + 16;++j) if (h2[j] != h[j]) return "crypto_hash writes after output"; if (crypto_hash(m2,m2,mlen) != 0) return "crypto_hash returns nonzero"; for (j = 0;j < hlen;++j) if (m2[j] != h[j]) return "crypto_hash does not handle overlap"; for (j = 0;j < mlen;++j) m[j] ^= h[j % hlen]; m[mlen] = h[0]; } if (crypto_hash(h,m,CHECKSUM_BYTES) != 0) return "crypto_hash returns nonzero"; for (i = 0;i < crypto_hash_BYTES;++i) { checksum[2 * i] = "0123456789abcdef"[15 & (h[i] >> 4)]; checksum[2 * i + 1] = "0123456789abcdef"[15 & h[i]]; } checksum[2 * i] = 0; return 0; } curvedns-curvedns-0.87/nacl/crypto_hash/wrapper-hash.cpp000066400000000000000000000003601150631715100234740ustar00rootroot00000000000000#include using std::string; #include "crypto_hash.h" string crypto_hash(const string &m) { unsigned char h[crypto_hash_BYTES]; crypto_hash(h,(const unsigned char *) m.c_str(),m.size()); return string((char *) h,sizeof h); } curvedns-curvedns-0.87/nacl/crypto_hashblocks/000077500000000000000000000000001150631715100215665ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_hashblocks/measure.c000066400000000000000000000005371150631715100234000ustar00rootroot00000000000000#include "crypto_hashblocks.h" const char *primitiveimplementation = crypto_hashblocks_IMPLEMENTATION; const char *implementationversion = crypto_hashblocks_VERSION; const char *sizenames[] = { "statebytes", 0 }; const long long sizes[] = { crypto_hashblocks_STATEBYTES }; void preallocate(void) { } void allocate(void) { } void measure(void) { } curvedns-curvedns-0.87/nacl/crypto_hashblocks/sha256/000077500000000000000000000000001150631715100225765ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_hashblocks/sha256/checksum000066400000000000000000000001011150631715100243130ustar00rootroot0000000000000069a9dc2464f9593161e462d3dbb634b84f1d68d67d26df29aaa805f9dcd8f656 curvedns-curvedns-0.87/nacl/crypto_hashblocks/sha256/inplace/000077500000000000000000000000001150631715100242115ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_hashblocks/sha256/inplace/api.h000066400000000000000000000000721150631715100251320ustar00rootroot00000000000000#define CRYPTO_STATEBYTES 32 #define CRYPTO_BLOCKBYTES 64 curvedns-curvedns-0.87/nacl/crypto_hashblocks/sha256/inplace/blocks.c000066400000000000000000000123041150631715100256320ustar00rootroot00000000000000#include "crypto_hashblocks.h" typedef unsigned int uint32; static uint32 load_bigendian(const unsigned char *x) { return (uint32) (x[3]) \ | (((uint32) (x[2])) << 8) \ | (((uint32) (x[1])) << 16) \ | (((uint32) (x[0])) << 24) ; } static void store_bigendian(unsigned char *x,uint32 u) { x[3] = u; u >>= 8; x[2] = u; u >>= 8; x[1] = u; u >>= 8; x[0] = u; } #define SHR(x,c) ((x) >> (c)) #define ROTR(x,c) (((x) >> (c)) | ((x) << (32 - (c)))) #define Ch(x,y,z) ((x & y) ^ (~x & z)) #define Maj(x,y,z) ((x & y) ^ (x & z) ^ (y & z)) #define Sigma0(x) (ROTR(x, 2) ^ ROTR(x,13) ^ ROTR(x,22)) #define Sigma1(x) (ROTR(x, 6) ^ ROTR(x,11) ^ ROTR(x,25)) #define sigma0(x) (ROTR(x, 7) ^ ROTR(x,18) ^ SHR(x, 3)) #define sigma1(x) (ROTR(x,17) ^ ROTR(x,19) ^ SHR(x,10)) #define M(w0,w14,w9,w1) w0 += sigma1(w14) + w9 + sigma0(w1); #define EXPAND \ M(w0 ,w14,w9 ,w1 ) \ M(w1 ,w15,w10,w2 ) \ M(w2 ,w0 ,w11,w3 ) \ M(w3 ,w1 ,w12,w4 ) \ M(w4 ,w2 ,w13,w5 ) \ M(w5 ,w3 ,w14,w6 ) \ M(w6 ,w4 ,w15,w7 ) \ M(w7 ,w5 ,w0 ,w8 ) \ M(w8 ,w6 ,w1 ,w9 ) \ M(w9 ,w7 ,w2 ,w10) \ M(w10,w8 ,w3 ,w11) \ M(w11,w9 ,w4 ,w12) \ M(w12,w10,w5 ,w13) \ M(w13,w11,w6 ,w14) \ M(w14,w12,w7 ,w15) \ M(w15,w13,w8 ,w0 ) #define F(r0,r1,r2,r3,r4,r5,r6,r7,w,k) \ r7 += Sigma1(r4) + Ch(r4,r5,r6) + k + w; \ r3 += r7; \ r7 += Sigma0(r0) + Maj(r0,r1,r2); #define G(r0,r1,r2,r3,r4,r5,r6,r7,i) \ F(r0,r1,r2,r3,r4,r5,r6,r7,w0 ,round[i + 0]) \ F(r7,r0,r1,r2,r3,r4,r5,r6,w1 ,round[i + 1]) \ F(r6,r7,r0,r1,r2,r3,r4,r5,w2 ,round[i + 2]) \ F(r5,r6,r7,r0,r1,r2,r3,r4,w3 ,round[i + 3]) \ F(r4,r5,r6,r7,r0,r1,r2,r3,w4 ,round[i + 4]) \ F(r3,r4,r5,r6,r7,r0,r1,r2,w5 ,round[i + 5]) \ F(r2,r3,r4,r5,r6,r7,r0,r1,w6 ,round[i + 6]) \ F(r1,r2,r3,r4,r5,r6,r7,r0,w7 ,round[i + 7]) \ F(r0,r1,r2,r3,r4,r5,r6,r7,w8 ,round[i + 8]) \ F(r7,r0,r1,r2,r3,r4,r5,r6,w9 ,round[i + 9]) \ F(r6,r7,r0,r1,r2,r3,r4,r5,w10,round[i + 10]) \ F(r5,r6,r7,r0,r1,r2,r3,r4,w11,round[i + 11]) \ F(r4,r5,r6,r7,r0,r1,r2,r3,w12,round[i + 12]) \ F(r3,r4,r5,r6,r7,r0,r1,r2,w13,round[i + 13]) \ F(r2,r3,r4,r5,r6,r7,r0,r1,w14,round[i + 14]) \ F(r1,r2,r3,r4,r5,r6,r7,r0,w15,round[i + 15]) static const uint32 round[64] = { 0x428a2f98 , 0x71374491 , 0xb5c0fbcf , 0xe9b5dba5 , 0x3956c25b , 0x59f111f1 , 0x923f82a4 , 0xab1c5ed5 , 0xd807aa98 , 0x12835b01 , 0x243185be , 0x550c7dc3 , 0x72be5d74 , 0x80deb1fe , 0x9bdc06a7 , 0xc19bf174 , 0xe49b69c1 , 0xefbe4786 , 0x0fc19dc6 , 0x240ca1cc , 0x2de92c6f , 0x4a7484aa , 0x5cb0a9dc , 0x76f988da , 0x983e5152 , 0xa831c66d , 0xb00327c8 , 0xbf597fc7 , 0xc6e00bf3 , 0xd5a79147 , 0x06ca6351 , 0x14292967 , 0x27b70a85 , 0x2e1b2138 , 0x4d2c6dfc , 0x53380d13 , 0x650a7354 , 0x766a0abb , 0x81c2c92e , 0x92722c85 , 0xa2bfe8a1 , 0xa81a664b , 0xc24b8b70 , 0xc76c51a3 , 0xd192e819 , 0xd6990624 , 0xf40e3585 , 0x106aa070 , 0x19a4c116 , 0x1e376c08 , 0x2748774c , 0x34b0bcb5 , 0x391c0cb3 , 0x4ed8aa4a , 0x5b9cca4f , 0x682e6ff3 , 0x748f82ee , 0x78a5636f , 0x84c87814 , 0x8cc70208 , 0x90befffa , 0xa4506ceb , 0xbef9a3f7 , 0xc67178f2 } ; int crypto_hashblocks(unsigned char *statebytes,const unsigned char *in,unsigned long long inlen) { uint32 state[8]; uint32 r0; uint32 r1; uint32 r2; uint32 r3; uint32 r4; uint32 r5; uint32 r6; uint32 r7; r0 = load_bigendian(statebytes + 0); state[0] = r0; r1 = load_bigendian(statebytes + 4); state[1] = r1; r2 = load_bigendian(statebytes + 8); state[2] = r2; r3 = load_bigendian(statebytes + 12); state[3] = r3; r4 = load_bigendian(statebytes + 16); state[4] = r4; r5 = load_bigendian(statebytes + 20); state[5] = r5; r6 = load_bigendian(statebytes + 24); state[6] = r6; r7 = load_bigendian(statebytes + 28); state[7] = r7; while (inlen >= 64) { uint32 w0 = load_bigendian(in + 0); uint32 w1 = load_bigendian(in + 4); uint32 w2 = load_bigendian(in + 8); uint32 w3 = load_bigendian(in + 12); uint32 w4 = load_bigendian(in + 16); uint32 w5 = load_bigendian(in + 20); uint32 w6 = load_bigendian(in + 24); uint32 w7 = load_bigendian(in + 28); uint32 w8 = load_bigendian(in + 32); uint32 w9 = load_bigendian(in + 36); uint32 w10 = load_bigendian(in + 40); uint32 w11 = load_bigendian(in + 44); uint32 w12 = load_bigendian(in + 48); uint32 w13 = load_bigendian(in + 52); uint32 w14 = load_bigendian(in + 56); uint32 w15 = load_bigendian(in + 60); G(r0,r1,r2,r3,r4,r5,r6,r7,0) EXPAND G(r0,r1,r2,r3,r4,r5,r6,r7,16) EXPAND G(r0,r1,r2,r3,r4,r5,r6,r7,32) EXPAND G(r0,r1,r2,r3,r4,r5,r6,r7,48) r0 += state[0]; r1 += state[1]; r2 += state[2]; r3 += state[3]; r4 += state[4]; r5 += state[5]; r6 += state[6]; r7 += state[7]; state[0] = r0; state[1] = r1; state[2] = r2; state[3] = r3; state[4] = r4; state[5] = r5; state[6] = r6; state[7] = r7; in += 64; inlen -= 64; } store_bigendian(statebytes + 0,state[0]); store_bigendian(statebytes + 4,state[1]); store_bigendian(statebytes + 8,state[2]); store_bigendian(statebytes + 12,state[3]); store_bigendian(statebytes + 16,state[4]); store_bigendian(statebytes + 20,state[5]); store_bigendian(statebytes + 24,state[6]); store_bigendian(statebytes + 28,state[7]); return 0; } curvedns-curvedns-0.87/nacl/crypto_hashblocks/sha256/ref/000077500000000000000000000000001150631715100233525ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_hashblocks/sha256/ref/api.h000066400000000000000000000000721150631715100242730ustar00rootroot00000000000000#define CRYPTO_STATEBYTES 32 #define CRYPTO_BLOCKBYTES 64 curvedns-curvedns-0.87/nacl/crypto_hashblocks/sha256/ref/blocks.c000066400000000000000000000115271150631715100250010ustar00rootroot00000000000000#include "crypto_hashblocks.h" typedef unsigned int uint32; static uint32 load_bigendian(const unsigned char *x) { return (uint32) (x[3]) \ | (((uint32) (x[2])) << 8) \ | (((uint32) (x[1])) << 16) \ | (((uint32) (x[0])) << 24) ; } static void store_bigendian(unsigned char *x,uint32 u) { x[3] = u; u >>= 8; x[2] = u; u >>= 8; x[1] = u; u >>= 8; x[0] = u; } #define SHR(x,c) ((x) >> (c)) #define ROTR(x,c) (((x) >> (c)) | ((x) << (32 - (c)))) #define Ch(x,y,z) ((x & y) ^ (~x & z)) #define Maj(x,y,z) ((x & y) ^ (x & z) ^ (y & z)) #define Sigma0(x) (ROTR(x, 2) ^ ROTR(x,13) ^ ROTR(x,22)) #define Sigma1(x) (ROTR(x, 6) ^ ROTR(x,11) ^ ROTR(x,25)) #define sigma0(x) (ROTR(x, 7) ^ ROTR(x,18) ^ SHR(x, 3)) #define sigma1(x) (ROTR(x,17) ^ ROTR(x,19) ^ SHR(x,10)) #define M(w0,w14,w9,w1) w0 = sigma1(w14) + w9 + sigma0(w1) + w0; #define EXPAND \ M(w0 ,w14,w9 ,w1 ) \ M(w1 ,w15,w10,w2 ) \ M(w2 ,w0 ,w11,w3 ) \ M(w3 ,w1 ,w12,w4 ) \ M(w4 ,w2 ,w13,w5 ) \ M(w5 ,w3 ,w14,w6 ) \ M(w6 ,w4 ,w15,w7 ) \ M(w7 ,w5 ,w0 ,w8 ) \ M(w8 ,w6 ,w1 ,w9 ) \ M(w9 ,w7 ,w2 ,w10) \ M(w10,w8 ,w3 ,w11) \ M(w11,w9 ,w4 ,w12) \ M(w12,w10,w5 ,w13) \ M(w13,w11,w6 ,w14) \ M(w14,w12,w7 ,w15) \ M(w15,w13,w8 ,w0 ) #define F(w,k) \ T1 = h + Sigma1(e) + Ch(e,f,g) + k + w; \ T2 = Sigma0(a) + Maj(a,b,c); \ h = g; \ g = f; \ f = e; \ e = d + T1; \ d = c; \ c = b; \ b = a; \ a = T1 + T2; int crypto_hashblocks(unsigned char *statebytes,const unsigned char *in,unsigned long long inlen) { uint32 state[8]; uint32 a; uint32 b; uint32 c; uint32 d; uint32 e; uint32 f; uint32 g; uint32 h; uint32 T1; uint32 T2; a = load_bigendian(statebytes + 0); state[0] = a; b = load_bigendian(statebytes + 4); state[1] = b; c = load_bigendian(statebytes + 8); state[2] = c; d = load_bigendian(statebytes + 12); state[3] = d; e = load_bigendian(statebytes + 16); state[4] = e; f = load_bigendian(statebytes + 20); state[5] = f; g = load_bigendian(statebytes + 24); state[6] = g; h = load_bigendian(statebytes + 28); state[7] = h; while (inlen >= 64) { uint32 w0 = load_bigendian(in + 0); uint32 w1 = load_bigendian(in + 4); uint32 w2 = load_bigendian(in + 8); uint32 w3 = load_bigendian(in + 12); uint32 w4 = load_bigendian(in + 16); uint32 w5 = load_bigendian(in + 20); uint32 w6 = load_bigendian(in + 24); uint32 w7 = load_bigendian(in + 28); uint32 w8 = load_bigendian(in + 32); uint32 w9 = load_bigendian(in + 36); uint32 w10 = load_bigendian(in + 40); uint32 w11 = load_bigendian(in + 44); uint32 w12 = load_bigendian(in + 48); uint32 w13 = load_bigendian(in + 52); uint32 w14 = load_bigendian(in + 56); uint32 w15 = load_bigendian(in + 60); F(w0 ,0x428a2f98) F(w1 ,0x71374491) F(w2 ,0xb5c0fbcf) F(w3 ,0xe9b5dba5) F(w4 ,0x3956c25b) F(w5 ,0x59f111f1) F(w6 ,0x923f82a4) F(w7 ,0xab1c5ed5) F(w8 ,0xd807aa98) F(w9 ,0x12835b01) F(w10,0x243185be) F(w11,0x550c7dc3) F(w12,0x72be5d74) F(w13,0x80deb1fe) F(w14,0x9bdc06a7) F(w15,0xc19bf174) EXPAND F(w0 ,0xe49b69c1) F(w1 ,0xefbe4786) F(w2 ,0x0fc19dc6) F(w3 ,0x240ca1cc) F(w4 ,0x2de92c6f) F(w5 ,0x4a7484aa) F(w6 ,0x5cb0a9dc) F(w7 ,0x76f988da) F(w8 ,0x983e5152) F(w9 ,0xa831c66d) F(w10,0xb00327c8) F(w11,0xbf597fc7) F(w12,0xc6e00bf3) F(w13,0xd5a79147) F(w14,0x06ca6351) F(w15,0x14292967) EXPAND F(w0 ,0x27b70a85) F(w1 ,0x2e1b2138) F(w2 ,0x4d2c6dfc) F(w3 ,0x53380d13) F(w4 ,0x650a7354) F(w5 ,0x766a0abb) F(w6 ,0x81c2c92e) F(w7 ,0x92722c85) F(w8 ,0xa2bfe8a1) F(w9 ,0xa81a664b) F(w10,0xc24b8b70) F(w11,0xc76c51a3) F(w12,0xd192e819) F(w13,0xd6990624) F(w14,0xf40e3585) F(w15,0x106aa070) EXPAND F(w0 ,0x19a4c116) F(w1 ,0x1e376c08) F(w2 ,0x2748774c) F(w3 ,0x34b0bcb5) F(w4 ,0x391c0cb3) F(w5 ,0x4ed8aa4a) F(w6 ,0x5b9cca4f) F(w7 ,0x682e6ff3) F(w8 ,0x748f82ee) F(w9 ,0x78a5636f) F(w10,0x84c87814) F(w11,0x8cc70208) F(w12,0x90befffa) F(w13,0xa4506ceb) F(w14,0xbef9a3f7) F(w15,0xc67178f2) a += state[0]; b += state[1]; c += state[2]; d += state[3]; e += state[4]; f += state[5]; g += state[6]; h += state[7]; state[0] = a; state[1] = b; state[2] = c; state[3] = d; state[4] = e; state[5] = f; state[6] = g; state[7] = h; in += 64; inlen -= 64; } store_bigendian(statebytes + 0,state[0]); store_bigendian(statebytes + 4,state[1]); store_bigendian(statebytes + 8,state[2]); store_bigendian(statebytes + 12,state[3]); store_bigendian(statebytes + 16,state[4]); store_bigendian(statebytes + 20,state[5]); store_bigendian(statebytes + 24,state[6]); store_bigendian(statebytes + 28,state[7]); return 0; } curvedns-curvedns-0.87/nacl/crypto_hashblocks/sha256/used000066400000000000000000000000001150631715100234470ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_hashblocks/sha512/000077500000000000000000000000001150631715100225715ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_hashblocks/sha512/checksum000066400000000000000000000002011150631715100243070ustar00rootroot00000000000000f005c91634ae549f0dd4529ddbaf07038cb75a59b818cd1d4eb4e2b4019ab6733556131f320c4a145c735a22594581d454cccb15c18bf198ffcb2da29fe39456 curvedns-curvedns-0.87/nacl/crypto_hashblocks/sha512/inplace/000077500000000000000000000000001150631715100242045ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_hashblocks/sha512/inplace/api.h000066400000000000000000000000731150631715100251260ustar00rootroot00000000000000#define CRYPTO_STATEBYTES 64 #define CRYPTO_BLOCKBYTES 128 curvedns-curvedns-0.87/nacl/crypto_hashblocks/sha512/inplace/blocks.c000066400000000000000000000150411150631715100256260ustar00rootroot00000000000000#include "crypto_hashblocks.h" typedef unsigned long long uint64; static uint64 load_bigendian(const unsigned char *x) { return (uint64) (x[7]) \ | (((uint64) (x[6])) << 8) \ | (((uint64) (x[5])) << 16) \ | (((uint64) (x[4])) << 24) \ | (((uint64) (x[3])) << 32) \ | (((uint64) (x[2])) << 40) \ | (((uint64) (x[1])) << 48) \ | (((uint64) (x[0])) << 56) ; } static void store_bigendian(unsigned char *x,uint64 u) { x[7] = u; u >>= 8; x[6] = u; u >>= 8; x[5] = u; u >>= 8; x[4] = u; u >>= 8; x[3] = u; u >>= 8; x[2] = u; u >>= 8; x[1] = u; u >>= 8; x[0] = u; } #define SHR(x,c) ((x) >> (c)) #define ROTR(x,c) (((x) >> (c)) | ((x) << (64 - (c)))) #define Ch(x,y,z) ((x & y) ^ (~x & z)) #define Maj(x,y,z) ((x & y) ^ (x & z) ^ (y & z)) #define Sigma0(x) (ROTR(x,28) ^ ROTR(x,34) ^ ROTR(x,39)) #define Sigma1(x) (ROTR(x,14) ^ ROTR(x,18) ^ ROTR(x,41)) #define sigma0(x) (ROTR(x, 1) ^ ROTR(x, 8) ^ SHR(x,7)) #define sigma1(x) (ROTR(x,19) ^ ROTR(x,61) ^ SHR(x,6)) #define M(w0,w14,w9,w1) w0 = sigma1(w14) + w9 + sigma0(w1) + w0; #define EXPAND \ M(w0 ,w14,w9 ,w1 ) \ M(w1 ,w15,w10,w2 ) \ M(w2 ,w0 ,w11,w3 ) \ M(w3 ,w1 ,w12,w4 ) \ M(w4 ,w2 ,w13,w5 ) \ M(w5 ,w3 ,w14,w6 ) \ M(w6 ,w4 ,w15,w7 ) \ M(w7 ,w5 ,w0 ,w8 ) \ M(w8 ,w6 ,w1 ,w9 ) \ M(w9 ,w7 ,w2 ,w10) \ M(w10,w8 ,w3 ,w11) \ M(w11,w9 ,w4 ,w12) \ M(w12,w10,w5 ,w13) \ M(w13,w11,w6 ,w14) \ M(w14,w12,w7 ,w15) \ M(w15,w13,w8 ,w0 ) #define F(r0,r1,r2,r3,r4,r5,r6,r7,w,k) \ r7 += Sigma1(r4) + Ch(r4,r5,r6) + k + w; \ r3 += r7; \ r7 += Sigma0(r0) + Maj(r0,r1,r2); #define G(r0,r1,r2,r3,r4,r5,r6,r7,i) \ F(r0,r1,r2,r3,r4,r5,r6,r7,w0 ,round[i + 0]) \ F(r7,r0,r1,r2,r3,r4,r5,r6,w1 ,round[i + 1]) \ F(r6,r7,r0,r1,r2,r3,r4,r5,w2 ,round[i + 2]) \ F(r5,r6,r7,r0,r1,r2,r3,r4,w3 ,round[i + 3]) \ F(r4,r5,r6,r7,r0,r1,r2,r3,w4 ,round[i + 4]) \ F(r3,r4,r5,r6,r7,r0,r1,r2,w5 ,round[i + 5]) \ F(r2,r3,r4,r5,r6,r7,r0,r1,w6 ,round[i + 6]) \ F(r1,r2,r3,r4,r5,r6,r7,r0,w7 ,round[i + 7]) \ F(r0,r1,r2,r3,r4,r5,r6,r7,w8 ,round[i + 8]) \ F(r7,r0,r1,r2,r3,r4,r5,r6,w9 ,round[i + 9]) \ F(r6,r7,r0,r1,r2,r3,r4,r5,w10,round[i + 10]) \ F(r5,r6,r7,r0,r1,r2,r3,r4,w11,round[i + 11]) \ F(r4,r5,r6,r7,r0,r1,r2,r3,w12,round[i + 12]) \ F(r3,r4,r5,r6,r7,r0,r1,r2,w13,round[i + 13]) \ F(r2,r3,r4,r5,r6,r7,r0,r1,w14,round[i + 14]) \ F(r1,r2,r3,r4,r5,r6,r7,r0,w15,round[i + 15]) static const uint64 round[80] = { 0x428a2f98d728ae22ULL , 0x7137449123ef65cdULL , 0xb5c0fbcfec4d3b2fULL , 0xe9b5dba58189dbbcULL , 0x3956c25bf348b538ULL , 0x59f111f1b605d019ULL , 0x923f82a4af194f9bULL , 0xab1c5ed5da6d8118ULL , 0xd807aa98a3030242ULL , 0x12835b0145706fbeULL , 0x243185be4ee4b28cULL , 0x550c7dc3d5ffb4e2ULL , 0x72be5d74f27b896fULL , 0x80deb1fe3b1696b1ULL , 0x9bdc06a725c71235ULL , 0xc19bf174cf692694ULL , 0xe49b69c19ef14ad2ULL , 0xefbe4786384f25e3ULL , 0x0fc19dc68b8cd5b5ULL , 0x240ca1cc77ac9c65ULL , 0x2de92c6f592b0275ULL , 0x4a7484aa6ea6e483ULL , 0x5cb0a9dcbd41fbd4ULL , 0x76f988da831153b5ULL , 0x983e5152ee66dfabULL , 0xa831c66d2db43210ULL , 0xb00327c898fb213fULL , 0xbf597fc7beef0ee4ULL , 0xc6e00bf33da88fc2ULL , 0xd5a79147930aa725ULL , 0x06ca6351e003826fULL , 0x142929670a0e6e70ULL , 0x27b70a8546d22ffcULL , 0x2e1b21385c26c926ULL , 0x4d2c6dfc5ac42aedULL , 0x53380d139d95b3dfULL , 0x650a73548baf63deULL , 0x766a0abb3c77b2a8ULL , 0x81c2c92e47edaee6ULL , 0x92722c851482353bULL , 0xa2bfe8a14cf10364ULL , 0xa81a664bbc423001ULL , 0xc24b8b70d0f89791ULL , 0xc76c51a30654be30ULL , 0xd192e819d6ef5218ULL , 0xd69906245565a910ULL , 0xf40e35855771202aULL , 0x106aa07032bbd1b8ULL , 0x19a4c116b8d2d0c8ULL , 0x1e376c085141ab53ULL , 0x2748774cdf8eeb99ULL , 0x34b0bcb5e19b48a8ULL , 0x391c0cb3c5c95a63ULL , 0x4ed8aa4ae3418acbULL , 0x5b9cca4f7763e373ULL , 0x682e6ff3d6b2b8a3ULL , 0x748f82ee5defb2fcULL , 0x78a5636f43172f60ULL , 0x84c87814a1f0ab72ULL , 0x8cc702081a6439ecULL , 0x90befffa23631e28ULL , 0xa4506cebde82bde9ULL , 0xbef9a3f7b2c67915ULL , 0xc67178f2e372532bULL , 0xca273eceea26619cULL , 0xd186b8c721c0c207ULL , 0xeada7dd6cde0eb1eULL , 0xf57d4f7fee6ed178ULL , 0x06f067aa72176fbaULL , 0x0a637dc5a2c898a6ULL , 0x113f9804bef90daeULL , 0x1b710b35131c471bULL , 0x28db77f523047d84ULL , 0x32caab7b40c72493ULL , 0x3c9ebe0a15c9bebcULL , 0x431d67c49c100d4cULL , 0x4cc5d4becb3e42b6ULL , 0x597f299cfc657e2aULL , 0x5fcb6fab3ad6faecULL , 0x6c44198c4a475817ULL }; int crypto_hashblocks(unsigned char *statebytes,const unsigned char *in,unsigned long long inlen) { uint64 state[8]; uint64 r0; uint64 r1; uint64 r2; uint64 r3; uint64 r4; uint64 r5; uint64 r6; uint64 r7; r0 = load_bigendian(statebytes + 0); state[0] = r0; r1 = load_bigendian(statebytes + 8); state[1] = r1; r2 = load_bigendian(statebytes + 16); state[2] = r2; r3 = load_bigendian(statebytes + 24); state[3] = r3; r4 = load_bigendian(statebytes + 32); state[4] = r4; r5 = load_bigendian(statebytes + 40); state[5] = r5; r6 = load_bigendian(statebytes + 48); state[6] = r6; r7 = load_bigendian(statebytes + 56); state[7] = r7; while (inlen >= 128) { uint64 w0 = load_bigendian(in + 0); uint64 w1 = load_bigendian(in + 8); uint64 w2 = load_bigendian(in + 16); uint64 w3 = load_bigendian(in + 24); uint64 w4 = load_bigendian(in + 32); uint64 w5 = load_bigendian(in + 40); uint64 w6 = load_bigendian(in + 48); uint64 w7 = load_bigendian(in + 56); uint64 w8 = load_bigendian(in + 64); uint64 w9 = load_bigendian(in + 72); uint64 w10 = load_bigendian(in + 80); uint64 w11 = load_bigendian(in + 88); uint64 w12 = load_bigendian(in + 96); uint64 w13 = load_bigendian(in + 104); uint64 w14 = load_bigendian(in + 112); uint64 w15 = load_bigendian(in + 120); G(r0,r1,r2,r3,r4,r5,r6,r7,0) EXPAND G(r0,r1,r2,r3,r4,r5,r6,r7,16) EXPAND G(r0,r1,r2,r3,r4,r5,r6,r7,32) EXPAND G(r0,r1,r2,r3,r4,r5,r6,r7,48) EXPAND G(r0,r1,r2,r3,r4,r5,r6,r7,64) r0 += state[0]; r1 += state[1]; r2 += state[2]; r3 += state[3]; r4 += state[4]; r5 += state[5]; r6 += state[6]; r7 += state[7]; state[0] = r0; state[1] = r1; state[2] = r2; state[3] = r3; state[4] = r4; state[5] = r5; state[6] = r6; state[7] = r7; in += 128; inlen -= 128; } store_bigendian(statebytes + 0,state[0]); store_bigendian(statebytes + 8,state[1]); store_bigendian(statebytes + 16,state[2]); store_bigendian(statebytes + 24,state[3]); store_bigendian(statebytes + 32,state[4]); store_bigendian(statebytes + 40,state[5]); store_bigendian(statebytes + 48,state[6]); store_bigendian(statebytes + 56,state[7]); return 0; } curvedns-curvedns-0.87/nacl/crypto_hashblocks/sha512/ref/000077500000000000000000000000001150631715100233455ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_hashblocks/sha512/ref/api.h000066400000000000000000000000731150631715100242670ustar00rootroot00000000000000#define CRYPTO_STATEBYTES 64 #define CRYPTO_BLOCKBYTES 128 curvedns-curvedns-0.87/nacl/crypto_hashblocks/sha512/ref/blocks.c000066400000000000000000000144371150631715100247770ustar00rootroot00000000000000#include "crypto_hashblocks.h" typedef unsigned long long uint64; static uint64 load_bigendian(const unsigned char *x) { return (uint64) (x[7]) \ | (((uint64) (x[6])) << 8) \ | (((uint64) (x[5])) << 16) \ | (((uint64) (x[4])) << 24) \ | (((uint64) (x[3])) << 32) \ | (((uint64) (x[2])) << 40) \ | (((uint64) (x[1])) << 48) \ | (((uint64) (x[0])) << 56) ; } static void store_bigendian(unsigned char *x,uint64 u) { x[7] = u; u >>= 8; x[6] = u; u >>= 8; x[5] = u; u >>= 8; x[4] = u; u >>= 8; x[3] = u; u >>= 8; x[2] = u; u >>= 8; x[1] = u; u >>= 8; x[0] = u; } #define SHR(x,c) ((x) >> (c)) #define ROTR(x,c) (((x) >> (c)) | ((x) << (64 - (c)))) #define Ch(x,y,z) ((x & y) ^ (~x & z)) #define Maj(x,y,z) ((x & y) ^ (x & z) ^ (y & z)) #define Sigma0(x) (ROTR(x,28) ^ ROTR(x,34) ^ ROTR(x,39)) #define Sigma1(x) (ROTR(x,14) ^ ROTR(x,18) ^ ROTR(x,41)) #define sigma0(x) (ROTR(x, 1) ^ ROTR(x, 8) ^ SHR(x,7)) #define sigma1(x) (ROTR(x,19) ^ ROTR(x,61) ^ SHR(x,6)) #define M(w0,w14,w9,w1) w0 = sigma1(w14) + w9 + sigma0(w1) + w0; #define EXPAND \ M(w0 ,w14,w9 ,w1 ) \ M(w1 ,w15,w10,w2 ) \ M(w2 ,w0 ,w11,w3 ) \ M(w3 ,w1 ,w12,w4 ) \ M(w4 ,w2 ,w13,w5 ) \ M(w5 ,w3 ,w14,w6 ) \ M(w6 ,w4 ,w15,w7 ) \ M(w7 ,w5 ,w0 ,w8 ) \ M(w8 ,w6 ,w1 ,w9 ) \ M(w9 ,w7 ,w2 ,w10) \ M(w10,w8 ,w3 ,w11) \ M(w11,w9 ,w4 ,w12) \ M(w12,w10,w5 ,w13) \ M(w13,w11,w6 ,w14) \ M(w14,w12,w7 ,w15) \ M(w15,w13,w8 ,w0 ) #define F(w,k) \ T1 = h + Sigma1(e) + Ch(e,f,g) + k + w; \ T2 = Sigma0(a) + Maj(a,b,c); \ h = g; \ g = f; \ f = e; \ e = d + T1; \ d = c; \ c = b; \ b = a; \ a = T1 + T2; int crypto_hashblocks(unsigned char *statebytes,const unsigned char *in,unsigned long long inlen) { uint64 state[8]; uint64 a; uint64 b; uint64 c; uint64 d; uint64 e; uint64 f; uint64 g; uint64 h; uint64 T1; uint64 T2; a = load_bigendian(statebytes + 0); state[0] = a; b = load_bigendian(statebytes + 8); state[1] = b; c = load_bigendian(statebytes + 16); state[2] = c; d = load_bigendian(statebytes + 24); state[3] = d; e = load_bigendian(statebytes + 32); state[4] = e; f = load_bigendian(statebytes + 40); state[5] = f; g = load_bigendian(statebytes + 48); state[6] = g; h = load_bigendian(statebytes + 56); state[7] = h; while (inlen >= 128) { uint64 w0 = load_bigendian(in + 0); uint64 w1 = load_bigendian(in + 8); uint64 w2 = load_bigendian(in + 16); uint64 w3 = load_bigendian(in + 24); uint64 w4 = load_bigendian(in + 32); uint64 w5 = load_bigendian(in + 40); uint64 w6 = load_bigendian(in + 48); uint64 w7 = load_bigendian(in + 56); uint64 w8 = load_bigendian(in + 64); uint64 w9 = load_bigendian(in + 72); uint64 w10 = load_bigendian(in + 80); uint64 w11 = load_bigendian(in + 88); uint64 w12 = load_bigendian(in + 96); uint64 w13 = load_bigendian(in + 104); uint64 w14 = load_bigendian(in + 112); uint64 w15 = load_bigendian(in + 120); F(w0 ,0x428a2f98d728ae22ULL) F(w1 ,0x7137449123ef65cdULL) F(w2 ,0xb5c0fbcfec4d3b2fULL) F(w3 ,0xe9b5dba58189dbbcULL) F(w4 ,0x3956c25bf348b538ULL) F(w5 ,0x59f111f1b605d019ULL) F(w6 ,0x923f82a4af194f9bULL) F(w7 ,0xab1c5ed5da6d8118ULL) F(w8 ,0xd807aa98a3030242ULL) F(w9 ,0x12835b0145706fbeULL) F(w10,0x243185be4ee4b28cULL) F(w11,0x550c7dc3d5ffb4e2ULL) F(w12,0x72be5d74f27b896fULL) F(w13,0x80deb1fe3b1696b1ULL) F(w14,0x9bdc06a725c71235ULL) F(w15,0xc19bf174cf692694ULL) EXPAND F(w0 ,0xe49b69c19ef14ad2ULL) F(w1 ,0xefbe4786384f25e3ULL) F(w2 ,0x0fc19dc68b8cd5b5ULL) F(w3 ,0x240ca1cc77ac9c65ULL) F(w4 ,0x2de92c6f592b0275ULL) F(w5 ,0x4a7484aa6ea6e483ULL) F(w6 ,0x5cb0a9dcbd41fbd4ULL) F(w7 ,0x76f988da831153b5ULL) F(w8 ,0x983e5152ee66dfabULL) F(w9 ,0xa831c66d2db43210ULL) F(w10,0xb00327c898fb213fULL) F(w11,0xbf597fc7beef0ee4ULL) F(w12,0xc6e00bf33da88fc2ULL) F(w13,0xd5a79147930aa725ULL) F(w14,0x06ca6351e003826fULL) F(w15,0x142929670a0e6e70ULL) EXPAND F(w0 ,0x27b70a8546d22ffcULL) F(w1 ,0x2e1b21385c26c926ULL) F(w2 ,0x4d2c6dfc5ac42aedULL) F(w3 ,0x53380d139d95b3dfULL) F(w4 ,0x650a73548baf63deULL) F(w5 ,0x766a0abb3c77b2a8ULL) F(w6 ,0x81c2c92e47edaee6ULL) F(w7 ,0x92722c851482353bULL) F(w8 ,0xa2bfe8a14cf10364ULL) F(w9 ,0xa81a664bbc423001ULL) F(w10,0xc24b8b70d0f89791ULL) F(w11,0xc76c51a30654be30ULL) F(w12,0xd192e819d6ef5218ULL) F(w13,0xd69906245565a910ULL) F(w14,0xf40e35855771202aULL) F(w15,0x106aa07032bbd1b8ULL) EXPAND F(w0 ,0x19a4c116b8d2d0c8ULL) F(w1 ,0x1e376c085141ab53ULL) F(w2 ,0x2748774cdf8eeb99ULL) F(w3 ,0x34b0bcb5e19b48a8ULL) F(w4 ,0x391c0cb3c5c95a63ULL) F(w5 ,0x4ed8aa4ae3418acbULL) F(w6 ,0x5b9cca4f7763e373ULL) F(w7 ,0x682e6ff3d6b2b8a3ULL) F(w8 ,0x748f82ee5defb2fcULL) F(w9 ,0x78a5636f43172f60ULL) F(w10,0x84c87814a1f0ab72ULL) F(w11,0x8cc702081a6439ecULL) F(w12,0x90befffa23631e28ULL) F(w13,0xa4506cebde82bde9ULL) F(w14,0xbef9a3f7b2c67915ULL) F(w15,0xc67178f2e372532bULL) EXPAND F(w0 ,0xca273eceea26619cULL) F(w1 ,0xd186b8c721c0c207ULL) F(w2 ,0xeada7dd6cde0eb1eULL) F(w3 ,0xf57d4f7fee6ed178ULL) F(w4 ,0x06f067aa72176fbaULL) F(w5 ,0x0a637dc5a2c898a6ULL) F(w6 ,0x113f9804bef90daeULL) F(w7 ,0x1b710b35131c471bULL) F(w8 ,0x28db77f523047d84ULL) F(w9 ,0x32caab7b40c72493ULL) F(w10,0x3c9ebe0a15c9bebcULL) F(w11,0x431d67c49c100d4cULL) F(w12,0x4cc5d4becb3e42b6ULL) F(w13,0x597f299cfc657e2aULL) F(w14,0x5fcb6fab3ad6faecULL) F(w15,0x6c44198c4a475817ULL) a += state[0]; b += state[1]; c += state[2]; d += state[3]; e += state[4]; f += state[5]; g += state[6]; h += state[7]; state[0] = a; state[1] = b; state[2] = c; state[3] = d; state[4] = e; state[5] = f; state[6] = g; state[7] = h; in += 128; inlen -= 128; } store_bigendian(statebytes + 0,state[0]); store_bigendian(statebytes + 8,state[1]); store_bigendian(statebytes + 16,state[2]); store_bigendian(statebytes + 24,state[3]); store_bigendian(statebytes + 32,state[4]); store_bigendian(statebytes + 40,state[5]); store_bigendian(statebytes + 48,state[6]); store_bigendian(statebytes + 56,state[7]); return 0; } curvedns-curvedns-0.87/nacl/crypto_hashblocks/sha512/selected000066400000000000000000000000001150631715100242720ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_hashblocks/sha512/used000066400000000000000000000000001150631715100234420ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_hashblocks/try.c000066400000000000000000000045641150631715100225610ustar00rootroot00000000000000/* * crypto_hashblocks/try.c version 20090118 * D. J. Bernstein * Public domain. */ #include #include "crypto_hashblocks.h" extern unsigned char *alignedcalloc(unsigned long long); const char *primitiveimplementation = crypto_hashblocks_IMPLEMENTATION; #define MAXTEST_BYTES (10000 + crypto_hashblocks_STATEBYTES) #define CHECKSUM_BYTES 4096 #define TUNE_BYTES 1536 static unsigned char *h; static unsigned char *h2; static unsigned char *m; static unsigned char *m2; void preallocate(void) { } void allocate(void) { h = alignedcalloc(crypto_hashblocks_STATEBYTES); h2 = alignedcalloc(crypto_hashblocks_STATEBYTES); m = alignedcalloc(MAXTEST_BYTES); m2 = alignedcalloc(MAXTEST_BYTES); } void predoit(void) { } void doit(void) { crypto_hashblocks(h,m,TUNE_BYTES); } char checksum[crypto_hashblocks_STATEBYTES * 2 + 1]; const char *checksum_compute(void) { long long i; long long j; for (i = 0;i < CHECKSUM_BYTES;++i) { long long hlen = crypto_hashblocks_STATEBYTES; long long mlen = i; for (j = -16;j < 0;++j) h[j] = random(); for (j = hlen;j < hlen + 16;++j) h[j] = random(); for (j = -16;j < hlen + 16;++j) h2[j] = h[j]; for (j = -16;j < 0;++j) m[j] = random(); for (j = mlen;j < mlen + 16;++j) m[j] = random(); for (j = -16;j < mlen + 16;++j) m2[j] = m[j]; if (crypto_hashblocks(h,m,mlen) != 0) return "crypto_hashblocks returns nonzero"; for (j = -16;j < mlen + 16;++j) if (m2[j] != m[j]) return "crypto_hashblocks writes to input"; for (j = -16;j < 0;++j) if (h2[j] != h[j]) return "crypto_hashblocks writes before output"; for (j = hlen;j < hlen + 16;++j) if (h2[j] != h[j]) return "crypto_hashblocks writes after output"; for (j = 0;j < hlen;++j) m2[j] = h2[j]; if (crypto_hashblocks(h2,m2,mlen) != 0) return "crypto_hashblocks returns nonzero"; if (crypto_hashblocks(m2,m2,mlen) != 0) return "crypto_hashblocks returns nonzero"; for (j = 0;j < hlen;++j) if (m2[j] != h2[j]) return "crypto_hashblocks does not handle overlap"; for (j = 0;j < mlen;++j) m[j] ^= h[j % hlen]; m[mlen] = h[0]; } if (crypto_hashblocks(h,m,CHECKSUM_BYTES) != 0) return "crypto_hashblocks returns nonzero"; for (i = 0;i < crypto_hashblocks_STATEBYTES;++i) { checksum[2 * i] = "0123456789abcdef"[15 & (h[i] >> 4)]; checksum[2 * i + 1] = "0123456789abcdef"[15 & h[i]]; } checksum[2 * i] = 0; return 0; } curvedns-curvedns-0.87/nacl/crypto_hashblocks/wrapper-empty.cpp000066400000000000000000000000001150631715100250740ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_onetimeauth/000077500000000000000000000000001150631715100217675ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_onetimeauth/measure.c000066400000000000000000000035321150631715100235770ustar00rootroot00000000000000#include "crypto_onetimeauth.h" #include "randombytes.h" #include "cpucycles.h" extern void printentry(long long,const char *,long long *,long long); extern unsigned char *alignedcalloc(unsigned long long); extern const char *primitiveimplementation; extern const char *implementationversion; extern const char *sizenames[]; extern const long long sizes[]; extern void allocate(void); extern void measure(void); const char *primitiveimplementation = crypto_onetimeauth_IMPLEMENTATION; const char *implementationversion = crypto_onetimeauth_VERSION; const char *sizenames[] = { "outputbytes", "keybytes", 0 }; const long long sizes[] = { crypto_onetimeauth_BYTES, crypto_onetimeauth_KEYBYTES }; #define MAXTEST_BYTES 4096 #ifdef SUPERCOP #define MGAP 8192 #else #define MGAP 8 #endif static unsigned char *k; static unsigned char *m; static unsigned char *h; void preallocate(void) { } void allocate(void) { k = alignedcalloc(crypto_onetimeauth_KEYBYTES); m = alignedcalloc(MAXTEST_BYTES); h = alignedcalloc(crypto_onetimeauth_BYTES); } #define TIMINGS 15 static long long cycles[TIMINGS + 1]; void measure(void) { int i; int loop; int mlen; for (loop = 0;loop < LOOPS;++loop) { for (mlen = 0;mlen <= MAXTEST_BYTES;mlen += 1 + mlen / MGAP) { randombytes(k,crypto_onetimeauth_KEYBYTES); randombytes(m,mlen); randombytes(h,crypto_onetimeauth_BYTES); for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_onetimeauth(h,m,mlen,k); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(mlen,"cycles",cycles,TIMINGS); for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_onetimeauth_verify(h,m,mlen,k); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(mlen,"verify_cycles",cycles,TIMINGS); } } } curvedns-curvedns-0.87/nacl/crypto_onetimeauth/poly1305/000077500000000000000000000000001150631715100232635ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_onetimeauth/poly1305/53/000077500000000000000000000000001150631715100235125ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_onetimeauth/poly1305/53/api.h000066400000000000000000000000631150631715100244330ustar00rootroot00000000000000#define CRYPTO_BYTES 16 #define CRYPTO_KEYBYTES 32 curvedns-curvedns-0.87/nacl/crypto_onetimeauth/poly1305/53/auth.c000066400000000000000000000612531150631715100246260ustar00rootroot00000000000000/* 20080910 D. J. Bernstein Public domain. */ #include "crypto_onetimeauth.h" typedef unsigned char uchar; typedef int int32; typedef unsigned int uint32; typedef long long int64; typedef unsigned long long uint64; static const double poly1305_53_constants[] = { 0.00000000558793544769287109375 /* alpham80 = 3 2^(-29) */ , 24.0 /* alpham48 = 3 2^3 */ , 103079215104.0 /* alpham16 = 3 2^35 */ , 6755399441055744.0 /* alpha0 = 3 2^51 */ , 1770887431076116955136.0 /* alpha18 = 3 2^69 */ , 29014219670751100192948224.0 /* alpha32 = 3 2^83 */ , 7605903601369376408980219232256.0 /* alpha50 = 3 2^101 */ , 124615124604835863084731911901282304.0 /* alpha64 = 3 2^115 */ , 32667107224410092492483962313449748299776.0 /* alpha82 = 3 2^133 */ , 535217884764734955396857238543560676143529984.0 /* alpha96 = 3 2^147 */ , 35076039295941670036888435985190792471742381031424.0 /* alpha112 = 3 2^163 */ , 9194973245195333150150082162901855101712434733101613056.0 /* alpha130 = 3 2^181 */ , 0.0000000000000000000000000000000000000036734198463196484624023016788195177431833298649127735047148490821200539357960224151611328125 /* scale = 5 2^(-130) */ , 6755408030990331.0 /* offset0 = alpha0 + 2^33 - 5 */ , 29014256564239239022116864.0 /* offset1 = alpha32 + 2^65 - 2^33 */ , 124615283061160854719918951570079744.0 /* offset2 = alpha64 + 2^97 - 2^65 */ , 535219245894202480694386063513315216128475136.0 /* offset3 = alpha96 + 2^130 - 2^97 */ } ; int crypto_onetimeauth(unsigned char *out,const unsigned char *m,unsigned long long l,const unsigned char *k) { register const unsigned char *r = k; register const unsigned char *s = k + 16; double r0high_stack; double r1high_stack; double r1low_stack; double sr1high_stack; double r2low_stack; double sr2high_stack; double r0low_stack; double sr1low_stack; double r2high_stack; double sr2low_stack; double r3high_stack; double sr3high_stack; double r3low_stack; double sr3low_stack; int64 d0; int64 d1; int64 d2; int64 d3; register double scale; register double alpha0; register double alpha32; register double alpha64; register double alpha96; register double alpha130; register double h0; register double h1; register double h2; register double h3; register double h4; register double h5; register double h6; register double h7; register double y7; register double y6; register double y1; register double y0; register double y5; register double y4; register double x7; register double x6; register double x1; register double x0; register double y3; register double y2; register double r3low; register double r0low; register double r3high; register double r0high; register double sr1low; register double x5; register double r3lowx0; register double sr1high; register double x4; register double r0lowx6; register double r1low; register double x3; register double r3highx0; register double r1high; register double x2; register double r0highx6; register double sr2low; register double r0lowx0; register double sr2high; register double sr1lowx6; register double r2low; register double r0highx0; register double r2high; register double sr1highx6; register double sr3low; register double r1lowx0; register double sr3high; register double sr2lowx6; register double r1highx0; register double sr2highx6; register double r2lowx0; register double sr3lowx6; register double r2highx0; register double sr3highx6; register double r1highx4; register double r1lowx4; register double r0highx4; register double r0lowx4; register double sr3highx4; register double sr3lowx4; register double sr2highx4; register double sr2lowx4; register double r0lowx2; register double r0highx2; register double r1lowx2; register double r1highx2; register double r2lowx2; register double r2highx2; register double sr3lowx2; register double sr3highx2; register double z0; register double z1; register double z2; register double z3; register int64 r0; register int64 r1; register int64 r2; register int64 r3; register uint32 r00; register uint32 r01; register uint32 r02; register uint32 r03; register uint32 r10; register uint32 r11; register uint32 r12; register uint32 r13; register uint32 r20; register uint32 r21; register uint32 r22; register uint32 r23; register uint32 r30; register uint32 r31; register uint32 r32; register uint32 r33; register int64 m0; register int64 m1; register int64 m2; register int64 m3; register uint32 m00; register uint32 m01; register uint32 m02; register uint32 m03; register uint32 m10; register uint32 m11; register uint32 m12; register uint32 m13; register uint32 m20; register uint32 m21; register uint32 m22; register uint32 m23; register uint32 m30; register uint32 m31; register uint32 m32; register uint64 m33; register char *constants; register int32 lbelow2; register int32 lbelow3; register int32 lbelow4; register int32 lbelow5; register int32 lbelow6; register int32 lbelow7; register int32 lbelow8; register int32 lbelow9; register int32 lbelow10; register int32 lbelow11; register int32 lbelow12; register int32 lbelow13; register int32 lbelow14; register int32 lbelow15; register double alpham80; register double alpham48; register double alpham16; register double alpha18; register double alpha50; register double alpha82; register double alpha112; register double offset0; register double offset1; register double offset2; register double offset3; register uint32 s00; register uint32 s01; register uint32 s02; register uint32 s03; register uint32 s10; register uint32 s11; register uint32 s12; register uint32 s13; register uint32 s20; register uint32 s21; register uint32 s22; register uint32 s23; register uint32 s30; register uint32 s31; register uint32 s32; register uint32 s33; register uint64 bits32; register uint64 f; register uint64 f0; register uint64 f1; register uint64 f2; register uint64 f3; register uint64 f4; register uint64 g; register uint64 g0; register uint64 g1; register uint64 g2; register uint64 g3; register uint64 g4; r00 = *(uchar *) (r + 0); constants = (char *) &poly1305_53_constants; r01 = *(uchar *) (r + 1); r02 = *(uchar *) (r + 2); r0 = 2151; r03 = *(uchar *) (r + 3); r03 &= 15; r0 <<= 51; r10 = *(uchar *) (r + 4); r10 &= 252; r01 <<= 8; r0 += r00; r11 = *(uchar *) (r + 5); r02 <<= 16; r0 += r01; r12 = *(uchar *) (r + 6); r03 <<= 24; r0 += r02; r13 = *(uchar *) (r + 7); r13 &= 15; r1 = 2215; r0 += r03; d0 = r0; r1 <<= 51; r2 = 2279; r20 = *(uchar *) (r + 8); r20 &= 252; r11 <<= 8; r1 += r10; r21 = *(uchar *) (r + 9); r12 <<= 16; r1 += r11; r22 = *(uchar *) (r + 10); r13 <<= 24; r1 += r12; r23 = *(uchar *) (r + 11); r23 &= 15; r2 <<= 51; r1 += r13; d1 = r1; r21 <<= 8; r2 += r20; r30 = *(uchar *) (r + 12); r30 &= 252; r22 <<= 16; r2 += r21; r31 = *(uchar *) (r + 13); r23 <<= 24; r2 += r22; r32 = *(uchar *) (r + 14); r2 += r23; r3 = 2343; d2 = r2; r3 <<= 51; alpha32 = *(double *) (constants + 40); r33 = *(uchar *) (r + 15); r33 &= 15; r31 <<= 8; r3 += r30; r32 <<= 16; r3 += r31; r33 <<= 24; r3 += r32; r3 += r33; h0 = alpha32 - alpha32; d3 = r3; h1 = alpha32 - alpha32; alpha0 = *(double *) (constants + 24); h2 = alpha32 - alpha32; alpha64 = *(double *) (constants + 56); h3 = alpha32 - alpha32; alpha18 = *(double *) (constants + 32); h4 = alpha32 - alpha32; r0low = *(double *) &d0; h5 = alpha32 - alpha32; r1low = *(double *) &d1; h6 = alpha32 - alpha32; r2low = *(double *) &d2; h7 = alpha32 - alpha32; alpha50 = *(double *) (constants + 48); r0low -= alpha0; alpha82 = *(double *) (constants + 64); r1low -= alpha32; scale = *(double *) (constants + 96); r2low -= alpha64; alpha96 = *(double *) (constants + 72); r0high = r0low + alpha18; r3low = *(double *) &d3; alpham80 = *(double *) (constants + 0); r1high = r1low + alpha50; sr1low = scale * r1low; alpham48 = *(double *) (constants + 8); r2high = r2low + alpha82; sr2low = scale * r2low; r0high -= alpha18; r0high_stack = r0high; r3low -= alpha96; r1high -= alpha50; r1high_stack = r1high; sr1high = sr1low + alpham80; alpha112 = *(double *) (constants + 80); r0low -= r0high; alpham16 = *(double *) (constants + 16); r2high -= alpha82; sr3low = scale * r3low; alpha130 = *(double *) (constants + 88); sr2high = sr2low + alpham48; r1low -= r1high; r1low_stack = r1low; sr1high -= alpham80; sr1high_stack = sr1high; r2low -= r2high; r2low_stack = r2low; sr2high -= alpham48; sr2high_stack = sr2high; r3high = r3low + alpha112; r0low_stack = r0low; sr1low -= sr1high; sr1low_stack = sr1low; sr3high = sr3low + alpham16; r2high_stack = r2high; sr2low -= sr2high; sr2low_stack = sr2low; r3high -= alpha112; r3high_stack = r3high; sr3high -= alpham16; sr3high_stack = sr3high; r3low -= r3high; r3low_stack = r3low; sr3low -= sr3high; sr3low_stack = sr3low; if (l < 16) goto addatmost15bytes; m00 = *(uchar *) (m + 0); m0 = 2151; m0 <<= 51; m1 = 2215; m01 = *(uchar *) (m + 1); m1 <<= 51; m2 = 2279; m02 = *(uchar *) (m + 2); m2 <<= 51; m3 = 2343; m03 = *(uchar *) (m + 3); m10 = *(uchar *) (m + 4); m01 <<= 8; m0 += m00; m11 = *(uchar *) (m + 5); m02 <<= 16; m0 += m01; m12 = *(uchar *) (m + 6); m03 <<= 24; m0 += m02; m13 = *(uchar *) (m + 7); m3 <<= 51; m0 += m03; m20 = *(uchar *) (m + 8); m11 <<= 8; m1 += m10; m21 = *(uchar *) (m + 9); m12 <<= 16; m1 += m11; m22 = *(uchar *) (m + 10); m13 <<= 24; m1 += m12; m23 = *(uchar *) (m + 11); m1 += m13; m30 = *(uchar *) (m + 12); m21 <<= 8; m2 += m20; m31 = *(uchar *) (m + 13); m22 <<= 16; m2 += m21; m32 = *(uchar *) (m + 14); m23 <<= 24; m2 += m22; m33 = *(uchar *) (m + 15); m2 += m23; d0 = m0; m31 <<= 8; m3 += m30; d1 = m1; m32 <<= 16; m3 += m31; d2 = m2; m33 += 256; m33 <<= 24; m3 += m32; m3 += m33; d3 = m3; m += 16; l -= 16; z0 = *(double *) &d0; z1 = *(double *) &d1; z2 = *(double *) &d2; z3 = *(double *) &d3; z0 -= alpha0; z1 -= alpha32; z2 -= alpha64; z3 -= alpha96; h0 += z0; h1 += z1; h3 += z2; h5 += z3; if (l < 16) goto multiplyaddatmost15bytes; multiplyaddatleast16bytes:; m2 = 2279; m20 = *(uchar *) (m + 8); y7 = h7 + alpha130; m2 <<= 51; m3 = 2343; m21 = *(uchar *) (m + 9); y6 = h6 + alpha130; m3 <<= 51; m0 = 2151; m22 = *(uchar *) (m + 10); y1 = h1 + alpha32; m0 <<= 51; m1 = 2215; m23 = *(uchar *) (m + 11); y0 = h0 + alpha32; m1 <<= 51; m30 = *(uchar *) (m + 12); y7 -= alpha130; m21 <<= 8; m2 += m20; m31 = *(uchar *) (m + 13); y6 -= alpha130; m22 <<= 16; m2 += m21; m32 = *(uchar *) (m + 14); y1 -= alpha32; m23 <<= 24; m2 += m22; m33 = *(uchar *) (m + 15); y0 -= alpha32; m2 += m23; m00 = *(uchar *) (m + 0); y5 = h5 + alpha96; m31 <<= 8; m3 += m30; m01 = *(uchar *) (m + 1); y4 = h4 + alpha96; m32 <<= 16; m02 = *(uchar *) (m + 2); x7 = h7 - y7; y7 *= scale; m33 += 256; m03 = *(uchar *) (m + 3); x6 = h6 - y6; y6 *= scale; m33 <<= 24; m3 += m31; m10 = *(uchar *) (m + 4); x1 = h1 - y1; m01 <<= 8; m3 += m32; m11 = *(uchar *) (m + 5); x0 = h0 - y0; m3 += m33; m0 += m00; m12 = *(uchar *) (m + 6); y5 -= alpha96; m02 <<= 16; m0 += m01; m13 = *(uchar *) (m + 7); y4 -= alpha96; m03 <<= 24; m0 += m02; d2 = m2; x1 += y7; m0 += m03; d3 = m3; x0 += y6; m11 <<= 8; m1 += m10; d0 = m0; x7 += y5; m12 <<= 16; m1 += m11; x6 += y4; m13 <<= 24; m1 += m12; y3 = h3 + alpha64; m1 += m13; d1 = m1; y2 = h2 + alpha64; x0 += x1; x6 += x7; y3 -= alpha64; r3low = r3low_stack; y2 -= alpha64; r0low = r0low_stack; x5 = h5 - y5; r3lowx0 = r3low * x0; r3high = r3high_stack; x4 = h4 - y4; r0lowx6 = r0low * x6; r0high = r0high_stack; x3 = h3 - y3; r3highx0 = r3high * x0; sr1low = sr1low_stack; x2 = h2 - y2; r0highx6 = r0high * x6; sr1high = sr1high_stack; x5 += y3; r0lowx0 = r0low * x0; r1low = r1low_stack; h6 = r3lowx0 + r0lowx6; sr1lowx6 = sr1low * x6; r1high = r1high_stack; x4 += y2; r0highx0 = r0high * x0; sr2low = sr2low_stack; h7 = r3highx0 + r0highx6; sr1highx6 = sr1high * x6; sr2high = sr2high_stack; x3 += y1; r1lowx0 = r1low * x0; r2low = r2low_stack; h0 = r0lowx0 + sr1lowx6; sr2lowx6 = sr2low * x6; r2high = r2high_stack; x2 += y0; r1highx0 = r1high * x0; sr3low = sr3low_stack; h1 = r0highx0 + sr1highx6; sr2highx6 = sr2high * x6; sr3high = sr3high_stack; x4 += x5; r2lowx0 = r2low * x0; z2 = *(double *) &d2; h2 = r1lowx0 + sr2lowx6; sr3lowx6 = sr3low * x6; x2 += x3; r2highx0 = r2high * x0; z3 = *(double *) &d3; h3 = r1highx0 + sr2highx6; sr3highx6 = sr3high * x6; r1highx4 = r1high * x4; z2 -= alpha64; h4 = r2lowx0 + sr3lowx6; r1lowx4 = r1low * x4; r0highx4 = r0high * x4; z3 -= alpha96; h5 = r2highx0 + sr3highx6; r0lowx4 = r0low * x4; h7 += r1highx4; sr3highx4 = sr3high * x4; h6 += r1lowx4; sr3lowx4 = sr3low * x4; h5 += r0highx4; sr2highx4 = sr2high * x4; h4 += r0lowx4; sr2lowx4 = sr2low * x4; h3 += sr3highx4; r0lowx2 = r0low * x2; h2 += sr3lowx4; r0highx2 = r0high * x2; h1 += sr2highx4; r1lowx2 = r1low * x2; h0 += sr2lowx4; r1highx2 = r1high * x2; h2 += r0lowx2; r2lowx2 = r2low * x2; h3 += r0highx2; r2highx2 = r2high * x2; h4 += r1lowx2; sr3lowx2 = sr3low * x2; h5 += r1highx2; sr3highx2 = sr3high * x2; alpha0 = *(double *) (constants + 24); m += 16; h6 += r2lowx2; l -= 16; h7 += r2highx2; z1 = *(double *) &d1; h0 += sr3lowx2; z0 = *(double *) &d0; h1 += sr3highx2; z1 -= alpha32; z0 -= alpha0; h5 += z3; h3 += z2; h1 += z1; h0 += z0; if (l >= 16) goto multiplyaddatleast16bytes; multiplyaddatmost15bytes:; y7 = h7 + alpha130; y6 = h6 + alpha130; y1 = h1 + alpha32; y0 = h0 + alpha32; y7 -= alpha130; y6 -= alpha130; y1 -= alpha32; y0 -= alpha32; y5 = h5 + alpha96; y4 = h4 + alpha96; x7 = h7 - y7; y7 *= scale; x6 = h6 - y6; y6 *= scale; x1 = h1 - y1; x0 = h0 - y0; y5 -= alpha96; y4 -= alpha96; x1 += y7; x0 += y6; x7 += y5; x6 += y4; y3 = h3 + alpha64; y2 = h2 + alpha64; x0 += x1; x6 += x7; y3 -= alpha64; r3low = r3low_stack; y2 -= alpha64; r0low = r0low_stack; x5 = h5 - y5; r3lowx0 = r3low * x0; r3high = r3high_stack; x4 = h4 - y4; r0lowx6 = r0low * x6; r0high = r0high_stack; x3 = h3 - y3; r3highx0 = r3high * x0; sr1low = sr1low_stack; x2 = h2 - y2; r0highx6 = r0high * x6; sr1high = sr1high_stack; x5 += y3; r0lowx0 = r0low * x0; r1low = r1low_stack; h6 = r3lowx0 + r0lowx6; sr1lowx6 = sr1low * x6; r1high = r1high_stack; x4 += y2; r0highx0 = r0high * x0; sr2low = sr2low_stack; h7 = r3highx0 + r0highx6; sr1highx6 = sr1high * x6; sr2high = sr2high_stack; x3 += y1; r1lowx0 = r1low * x0; r2low = r2low_stack; h0 = r0lowx0 + sr1lowx6; sr2lowx6 = sr2low * x6; r2high = r2high_stack; x2 += y0; r1highx0 = r1high * x0; sr3low = sr3low_stack; h1 = r0highx0 + sr1highx6; sr2highx6 = sr2high * x6; sr3high = sr3high_stack; x4 += x5; r2lowx0 = r2low * x0; h2 = r1lowx0 + sr2lowx6; sr3lowx6 = sr3low * x6; x2 += x3; r2highx0 = r2high * x0; h3 = r1highx0 + sr2highx6; sr3highx6 = sr3high * x6; r1highx4 = r1high * x4; h4 = r2lowx0 + sr3lowx6; r1lowx4 = r1low * x4; r0highx4 = r0high * x4; h5 = r2highx0 + sr3highx6; r0lowx4 = r0low * x4; h7 += r1highx4; sr3highx4 = sr3high * x4; h6 += r1lowx4; sr3lowx4 = sr3low * x4; h5 += r0highx4; sr2highx4 = sr2high * x4; h4 += r0lowx4; sr2lowx4 = sr2low * x4; h3 += sr3highx4; r0lowx2 = r0low * x2; h2 += sr3lowx4; r0highx2 = r0high * x2; h1 += sr2highx4; r1lowx2 = r1low * x2; h0 += sr2lowx4; r1highx2 = r1high * x2; h2 += r0lowx2; r2lowx2 = r2low * x2; h3 += r0highx2; r2highx2 = r2high * x2; h4 += r1lowx2; sr3lowx2 = sr3low * x2; h5 += r1highx2; sr3highx2 = sr3high * x2; h6 += r2lowx2; h7 += r2highx2; h0 += sr3lowx2; h1 += sr3highx2; addatmost15bytes:; if (l == 0) goto nomorebytes; lbelow2 = l - 2; lbelow3 = l - 3; lbelow2 >>= 31; lbelow4 = l - 4; m00 = *(uchar *) (m + 0); lbelow3 >>= 31; m += lbelow2; m01 = *(uchar *) (m + 1); lbelow4 >>= 31; m += lbelow3; m02 = *(uchar *) (m + 2); m += lbelow4; m0 = 2151; m03 = *(uchar *) (m + 3); m0 <<= 51; m1 = 2215; m0 += m00; m01 &= ~lbelow2; m02 &= ~lbelow3; m01 -= lbelow2; m01 <<= 8; m03 &= ~lbelow4; m0 += m01; lbelow2 -= lbelow3; m02 += lbelow2; lbelow3 -= lbelow4; m02 <<= 16; m03 += lbelow3; m03 <<= 24; m0 += m02; m0 += m03; lbelow5 = l - 5; lbelow6 = l - 6; lbelow7 = l - 7; lbelow5 >>= 31; lbelow8 = l - 8; lbelow6 >>= 31; m += lbelow5; m10 = *(uchar *) (m + 4); lbelow7 >>= 31; m += lbelow6; m11 = *(uchar *) (m + 5); lbelow8 >>= 31; m += lbelow7; m12 = *(uchar *) (m + 6); m1 <<= 51; m += lbelow8; m13 = *(uchar *) (m + 7); m10 &= ~lbelow5; lbelow4 -= lbelow5; m10 += lbelow4; lbelow5 -= lbelow6; m11 &= ~lbelow6; m11 += lbelow5; m11 <<= 8; m1 += m10; m1 += m11; m12 &= ~lbelow7; lbelow6 -= lbelow7; m13 &= ~lbelow8; m12 += lbelow6; lbelow7 -= lbelow8; m12 <<= 16; m13 += lbelow7; m13 <<= 24; m1 += m12; m1 += m13; m2 = 2279; lbelow9 = l - 9; m3 = 2343; lbelow10 = l - 10; lbelow11 = l - 11; lbelow9 >>= 31; lbelow12 = l - 12; lbelow10 >>= 31; m += lbelow9; m20 = *(uchar *) (m + 8); lbelow11 >>= 31; m += lbelow10; m21 = *(uchar *) (m + 9); lbelow12 >>= 31; m += lbelow11; m22 = *(uchar *) (m + 10); m2 <<= 51; m += lbelow12; m23 = *(uchar *) (m + 11); m20 &= ~lbelow9; lbelow8 -= lbelow9; m20 += lbelow8; lbelow9 -= lbelow10; m21 &= ~lbelow10; m21 += lbelow9; m21 <<= 8; m2 += m20; m2 += m21; m22 &= ~lbelow11; lbelow10 -= lbelow11; m23 &= ~lbelow12; m22 += lbelow10; lbelow11 -= lbelow12; m22 <<= 16; m23 += lbelow11; m23 <<= 24; m2 += m22; m3 <<= 51; lbelow13 = l - 13; lbelow13 >>= 31; lbelow14 = l - 14; lbelow14 >>= 31; m += lbelow13; lbelow15 = l - 15; m30 = *(uchar *) (m + 12); lbelow15 >>= 31; m += lbelow14; m31 = *(uchar *) (m + 13); m += lbelow15; m2 += m23; m32 = *(uchar *) (m + 14); m30 &= ~lbelow13; lbelow12 -= lbelow13; m30 += lbelow12; lbelow13 -= lbelow14; m3 += m30; m31 &= ~lbelow14; m31 += lbelow13; m32 &= ~lbelow15; m31 <<= 8; lbelow14 -= lbelow15; m3 += m31; m32 += lbelow14; d0 = m0; m32 <<= 16; m33 = lbelow15 + 1; d1 = m1; m33 <<= 24; m3 += m32; d2 = m2; m3 += m33; d3 = m3; alpha0 = *(double *) (constants + 24); z3 = *(double *) &d3; z2 = *(double *) &d2; z1 = *(double *) &d1; z0 = *(double *) &d0; z3 -= alpha96; z2 -= alpha64; z1 -= alpha32; z0 -= alpha0; h5 += z3; h3 += z2; h1 += z1; h0 += z0; y7 = h7 + alpha130; y6 = h6 + alpha130; y1 = h1 + alpha32; y0 = h0 + alpha32; y7 -= alpha130; y6 -= alpha130; y1 -= alpha32; y0 -= alpha32; y5 = h5 + alpha96; y4 = h4 + alpha96; x7 = h7 - y7; y7 *= scale; x6 = h6 - y6; y6 *= scale; x1 = h1 - y1; x0 = h0 - y0; y5 -= alpha96; y4 -= alpha96; x1 += y7; x0 += y6; x7 += y5; x6 += y4; y3 = h3 + alpha64; y2 = h2 + alpha64; x0 += x1; x6 += x7; y3 -= alpha64; r3low = r3low_stack; y2 -= alpha64; r0low = r0low_stack; x5 = h5 - y5; r3lowx0 = r3low * x0; r3high = r3high_stack; x4 = h4 - y4; r0lowx6 = r0low * x6; r0high = r0high_stack; x3 = h3 - y3; r3highx0 = r3high * x0; sr1low = sr1low_stack; x2 = h2 - y2; r0highx6 = r0high * x6; sr1high = sr1high_stack; x5 += y3; r0lowx0 = r0low * x0; r1low = r1low_stack; h6 = r3lowx0 + r0lowx6; sr1lowx6 = sr1low * x6; r1high = r1high_stack; x4 += y2; r0highx0 = r0high * x0; sr2low = sr2low_stack; h7 = r3highx0 + r0highx6; sr1highx6 = sr1high * x6; sr2high = sr2high_stack; x3 += y1; r1lowx0 = r1low * x0; r2low = r2low_stack; h0 = r0lowx0 + sr1lowx6; sr2lowx6 = sr2low * x6; r2high = r2high_stack; x2 += y0; r1highx0 = r1high * x0; sr3low = sr3low_stack; h1 = r0highx0 + sr1highx6; sr2highx6 = sr2high * x6; sr3high = sr3high_stack; x4 += x5; r2lowx0 = r2low * x0; h2 = r1lowx0 + sr2lowx6; sr3lowx6 = sr3low * x6; x2 += x3; r2highx0 = r2high * x0; h3 = r1highx0 + sr2highx6; sr3highx6 = sr3high * x6; r1highx4 = r1high * x4; h4 = r2lowx0 + sr3lowx6; r1lowx4 = r1low * x4; r0highx4 = r0high * x4; h5 = r2highx0 + sr3highx6; r0lowx4 = r0low * x4; h7 += r1highx4; sr3highx4 = sr3high * x4; h6 += r1lowx4; sr3lowx4 = sr3low * x4; h5 += r0highx4; sr2highx4 = sr2high * x4; h4 += r0lowx4; sr2lowx4 = sr2low * x4; h3 += sr3highx4; r0lowx2 = r0low * x2; h2 += sr3lowx4; r0highx2 = r0high * x2; h1 += sr2highx4; r1lowx2 = r1low * x2; h0 += sr2lowx4; r1highx2 = r1high * x2; h2 += r0lowx2; r2lowx2 = r2low * x2; h3 += r0highx2; r2highx2 = r2high * x2; h4 += r1lowx2; sr3lowx2 = sr3low * x2; h5 += r1highx2; sr3highx2 = sr3high * x2; h6 += r2lowx2; h7 += r2highx2; h0 += sr3lowx2; h1 += sr3highx2; nomorebytes:; offset0 = *(double *) (constants + 104); y7 = h7 + alpha130; offset1 = *(double *) (constants + 112); y0 = h0 + alpha32; offset2 = *(double *) (constants + 120); y1 = h1 + alpha32; offset3 = *(double *) (constants + 128); y2 = h2 + alpha64; y7 -= alpha130; y3 = h3 + alpha64; y4 = h4 + alpha96; y5 = h5 + alpha96; x7 = h7 - y7; y7 *= scale; y0 -= alpha32; y1 -= alpha32; y2 -= alpha64; h6 += x7; y3 -= alpha64; y4 -= alpha96; y5 -= alpha96; y6 = h6 + alpha130; x0 = h0 - y0; x1 = h1 - y1; x2 = h2 - y2; y6 -= alpha130; x0 += y7; x3 = h3 - y3; x4 = h4 - y4; x5 = h5 - y5; x6 = h6 - y6; y6 *= scale; x2 += y0; x3 += y1; x4 += y2; x0 += y6; x5 += y3; x6 += y4; x2 += x3; x0 += x1; x4 += x5; x6 += y5; x2 += offset1; *(double *) &d1 = x2; x0 += offset0; *(double *) &d0 = x0; x4 += offset2; *(double *) &d2 = x4; x6 += offset3; *(double *) &d3 = x6; f0 = d0; f1 = d1; bits32 = -1; f2 = d2; bits32 >>= 32; f3 = d3; f = f0 >> 32; f0 &= bits32; f &= 255; f1 += f; g0 = f0 + 5; g = g0 >> 32; g0 &= bits32; f = f1 >> 32; f1 &= bits32; f &= 255; g1 = f1 + g; g = g1 >> 32; f2 += f; f = f2 >> 32; g1 &= bits32; f2 &= bits32; f &= 255; f3 += f; g2 = f2 + g; g = g2 >> 32; g2 &= bits32; f4 = f3 >> 32; f3 &= bits32; f4 &= 255; g3 = f3 + g; g = g3 >> 32; g3 &= bits32; g4 = f4 + g; g4 = g4 - 4; s00 = *(uchar *) (s + 0); f = (int64) g4 >> 63; s01 = *(uchar *) (s + 1); f0 &= f; g0 &= ~f; s02 = *(uchar *) (s + 2); f1 &= f; f0 |= g0; s03 = *(uchar *) (s + 3); g1 &= ~f; f2 &= f; s10 = *(uchar *) (s + 4); f3 &= f; g2 &= ~f; s11 = *(uchar *) (s + 5); g3 &= ~f; f1 |= g1; s12 = *(uchar *) (s + 6); f2 |= g2; f3 |= g3; s13 = *(uchar *) (s + 7); s01 <<= 8; f0 += s00; s20 = *(uchar *) (s + 8); s02 <<= 16; f0 += s01; s21 = *(uchar *) (s + 9); s03 <<= 24; f0 += s02; s22 = *(uchar *) (s + 10); s11 <<= 8; f1 += s10; s23 = *(uchar *) (s + 11); s12 <<= 16; f1 += s11; s30 = *(uchar *) (s + 12); s13 <<= 24; f1 += s12; s31 = *(uchar *) (s + 13); f0 += s03; f1 += s13; s32 = *(uchar *) (s + 14); s21 <<= 8; f2 += s20; s33 = *(uchar *) (s + 15); s22 <<= 16; f2 += s21; s23 <<= 24; f2 += s22; s31 <<= 8; f3 += s30; s32 <<= 16; f3 += s31; s33 <<= 24; f3 += s32; f2 += s23; f3 += s33; *(uchar *) (out + 0) = f0; f0 >>= 8; *(uchar *) (out + 1) = f0; f0 >>= 8; *(uchar *) (out + 2) = f0; f0 >>= 8; *(uchar *) (out + 3) = f0; f0 >>= 8; f1 += f0; *(uchar *) (out + 4) = f1; f1 >>= 8; *(uchar *) (out + 5) = f1; f1 >>= 8; *(uchar *) (out + 6) = f1; f1 >>= 8; *(uchar *) (out + 7) = f1; f1 >>= 8; f2 += f1; *(uchar *) (out + 8) = f2; f2 >>= 8; *(uchar *) (out + 9) = f2; f2 >>= 8; *(uchar *) (out + 10) = f2; f2 >>= 8; *(uchar *) (out + 11) = f2; f2 >>= 8; f3 += f2; *(uchar *) (out + 12) = f3; f3 >>= 8; *(uchar *) (out + 13) = f3; f3 >>= 8; *(uchar *) (out + 14) = f3; f3 >>= 8; *(uchar *) (out + 15) = f3; return 0; } curvedns-curvedns-0.87/nacl/crypto_onetimeauth/poly1305/53/verify.c000066400000000000000000000004561150631715100251670ustar00rootroot00000000000000#include "crypto_verify_16.h" #include "crypto_onetimeauth.h" int crypto_onetimeauth_verify(const unsigned char *h,const unsigned char *in,unsigned long long inlen,const unsigned char *k) { unsigned char correct[16]; crypto_onetimeauth(correct,in,inlen,k); return crypto_verify_16(h,correct); } curvedns-curvedns-0.87/nacl/crypto_onetimeauth/poly1305/amd64/000077500000000000000000000000001150631715100241765ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_onetimeauth/poly1305/amd64/api.h000066400000000000000000000000631150631715100251170ustar00rootroot00000000000000#define CRYPTO_BYTES 16 #define CRYPTO_KEYBYTES 32 curvedns-curvedns-0.87/nacl/crypto_onetimeauth/poly1305/amd64/auth.s000066400000000000000000002416311150631715100253320ustar00rootroot00000000000000 # qhasm: int64 r11_caller # qhasm: int64 r12_caller # qhasm: int64 r13_caller # qhasm: int64 r14_caller # qhasm: int64 r15_caller # qhasm: int64 rbx_caller # qhasm: int64 rbp_caller # qhasm: caller r11_caller # qhasm: caller r12_caller # qhasm: caller r13_caller # qhasm: caller r14_caller # qhasm: caller r15_caller # qhasm: caller rbx_caller # qhasm: caller rbp_caller # qhasm: stack64 r11_stack # qhasm: stack64 r12_stack # qhasm: stack64 r13_stack # qhasm: stack64 r14_stack # qhasm: stack64 r15_stack # qhasm: stack64 rbx_stack # qhasm: stack64 rbp_stack # qhasm: int64 out # qhasm: stack64 out_stack # qhasm: int64 m # qhasm: int64 l # qhasm: int64 k # qhasm: stack64 k_stack # qhasm: int64 m0 # qhasm: int64 m1 # qhasm: int64 m2 # qhasm: int64 m3 # qhasm: float80 a0 # qhasm: float80 a1 # qhasm: float80 a2 # qhasm: float80 a3 # qhasm: float80 h0 # qhasm: float80 h1 # qhasm: float80 h2 # qhasm: float80 h3 # qhasm: float80 x0 # qhasm: float80 x1 # qhasm: float80 x2 # qhasm: float80 x3 # qhasm: float80 y0 # qhasm: float80 y1 # qhasm: float80 y2 # qhasm: float80 y3 # qhasm: float80 r0x0 # qhasm: float80 r1x0 # qhasm: float80 r2x0 # qhasm: float80 r3x0 # qhasm: float80 r0x1 # qhasm: float80 r1x1 # qhasm: float80 r2x1 # qhasm: float80 sr3x1 # qhasm: float80 r0x2 # qhasm: float80 r1x2 # qhasm: float80 sr2x2 # qhasm: float80 sr3x2 # qhasm: float80 r0x3 # qhasm: float80 sr1x3 # qhasm: float80 sr2x3 # qhasm: float80 sr3x3 # qhasm: stack64 d0 # qhasm: stack64 d1 # qhasm: stack64 d2 # qhasm: stack64 d3 # qhasm: stack64 r0 # qhasm: stack64 r1 # qhasm: stack64 r2 # qhasm: stack64 r3 # qhasm: stack64 sr1 # qhasm: stack64 sr2 # qhasm: stack64 sr3 # qhasm: enter crypto_onetimeauth_poly1305_amd64 .text .p2align 5 .globl _crypto_onetimeauth_poly1305_amd64 .globl crypto_onetimeauth_poly1305_amd64 _crypto_onetimeauth_poly1305_amd64: crypto_onetimeauth_poly1305_amd64: mov %rsp,%r11 and $31,%r11 add $192,%r11 sub %r11,%rsp # qhasm: input out # qhasm: input m # qhasm: input l # qhasm: input k # qhasm: r11_stack = r11_caller # asm 1: movq r11_stack=stack64#1 # asm 2: movq r11_stack=32(%rsp) movq %r11,32(%rsp) # qhasm: r12_stack = r12_caller # asm 1: movq r12_stack=stack64#2 # asm 2: movq r12_stack=40(%rsp) movq %r12,40(%rsp) # qhasm: r13_stack = r13_caller # asm 1: movq r13_stack=stack64#3 # asm 2: movq r13_stack=48(%rsp) movq %r13,48(%rsp) # qhasm: r14_stack = r14_caller # asm 1: movq r14_stack=stack64#4 # asm 2: movq r14_stack=56(%rsp) movq %r14,56(%rsp) # qhasm: r15_stack = r15_caller # asm 1: movq r15_stack=stack64#5 # asm 2: movq r15_stack=64(%rsp) movq %r15,64(%rsp) # qhasm: rbx_stack = rbx_caller # asm 1: movq rbx_stack=stack64#6 # asm 2: movq rbx_stack=72(%rsp) movq %rbx,72(%rsp) # qhasm: rbp_stack = rbp_caller # asm 1: movq rbp_stack=stack64#7 # asm 2: movq rbp_stack=80(%rsp) movq %rbp,80(%rsp) # qhasm: round *(uint16 *) &crypto_onetimeauth_poly1305_amd64_rounding fldcw crypto_onetimeauth_poly1305_amd64_rounding(%rip) # qhasm: m0 = *(uint32 *) (k + 0) # asm 1: movl 0(m0=int64#5d # asm 2: movl 0(m0=%r8d movl 0(%rcx),%r8d # qhasm: m1 = *(uint32 *) (k + 4) # asm 1: movl 4(m1=int64#6d # asm 2: movl 4(m1=%r9d movl 4(%rcx),%r9d # qhasm: m2 = *(uint32 *) (k + 8) # asm 1: movl 8(m2=int64#7d # asm 2: movl 8(m2=%eax movl 8(%rcx),%eax # qhasm: m3 = *(uint32 *) (k + 12) # asm 1: movl 12(m3=int64#8d # asm 2: movl 12(m3=%r10d movl 12(%rcx),%r10d # qhasm: out_stack = out # asm 1: movq out_stack=stack64#8 # asm 2: movq out_stack=88(%rsp) movq %rdi,88(%rsp) # qhasm: k_stack = k # asm 1: movq k_stack=stack64#9 # asm 2: movq k_stack=96(%rsp) movq %rcx,96(%rsp) # qhasm: d0 top = 0x43300000 # asm 1: movl $0x43300000,>d0=stack64#10 # asm 2: movl $0x43300000,>d0=108(%rsp) movl $0x43300000,108(%rsp) # qhasm: d1 top = 0x45300000 # asm 1: movl $0x45300000,>d1=stack64#11 # asm 2: movl $0x45300000,>d1=116(%rsp) movl $0x45300000,116(%rsp) # qhasm: d2 top = 0x47300000 # asm 1: movl $0x47300000,>d2=stack64#12 # asm 2: movl $0x47300000,>d2=124(%rsp) movl $0x47300000,124(%rsp) # qhasm: d3 top = 0x49300000 # asm 1: movl $0x49300000,>d3=stack64#13 # asm 2: movl $0x49300000,>d3=132(%rsp) movl $0x49300000,132(%rsp) # qhasm: (uint32) m0 &= 0x0fffffff # asm 1: and $0x0fffffff,r0=stack64#14 # asm 2: fstpl >r0=136(%rsp) fstpl 136(%rsp) # comment:fpstackfrombottom:r1=stack64#15 # asm 2: fstl >r1=144(%rsp) fstl 144(%rsp) # comment:fpstackfrombottom:sr1=stack64#16 # asm 2: fstpl >sr1=152(%rsp) fstpl 152(%rsp) # comment:fpstackfrombottom:r2=stack64#17 # asm 2: fstl >r2=160(%rsp) fstl 160(%rsp) # comment:fpstackfrombottom:sr2=stack64#18 # asm 2: fstpl >sr2=168(%rsp) fstpl 168(%rsp) # comment:fpstackfrombottom:r3=stack64#19 # asm 2: fstl >r3=176(%rsp) fstl 176(%rsp) # comment:fpstackfrombottom:sr3=stack64#20 # asm 2: fstpl >sr3=184(%rsp) fstpl 184(%rsp) # comment:fpstackfrombottom: # qhasm: h3 = 0 fldz # comment:fpstackfrombottom:m3=int64#1d # asm 2: movl 12(m3=%edi movl 12(%rsi),%edi # comment:fpstackfrombottom:m2=int64#4d # asm 2: movl 8(m2=%ecx movl 8(%rsi),%ecx # comment:fpstackfrombottom:m1=int64#5d # asm 2: movl 4(m1=%r8d movl 4(%rsi),%r8d # comment:fpstackfrombottom:m0=int64#6d # asm 2: movl 0(m0=%r9d movl 0(%rsi),%r9d # comment:fpstackfrombottom:m3=int64#1d # asm 2: movl 12(m3=%edi movl 12(%rsi),%edi # comment:fpstackfrombottom:m2=int64#4d # asm 2: movl 8(m2=%ecx movl 8(%rsi),%ecx # comment:fpstackfrombottom:m1=int64#5d # asm 2: movl 4(m1=%r8d movl 4(%rsi),%r8d # comment:fpstackfrombottom:m0=int64#6d # asm 2: movl 0(m0=%r9d movl 0(%rsi),%r9d # comment:fpstackfrombottom:lastchunk=stack128#1 # asm 2: movl $0,>lastchunk=0(%rsp) movl $0,0(%rsp) # comment:fpstackfrombottom:destination=int64#1 # asm 2: leaq destination=%rdi leaq 0(%rsp),%rdi # comment:fpstackfrombottom:numbytes=int64#4 # asm 2: mov numbytes=%rcx mov %rdx,%rcx # comment:fpstackfrombottom:m3=int64#1d # asm 2: movl 12+m3=%edi movl 12+0(%rsp),%edi # comment:fpstackfrombottom:m2=int64#2d # asm 2: movl 8+m2=%esi movl 8+0(%rsp),%esi # comment:fpstackfrombottom:m1=int64#3d # asm 2: movl 4+m1=%edx movl 4+0(%rsp),%edx # comment:fpstackfrombottom:m0=int64#4d # asm 2: movl m0=%ecx movl 0(%rsp),%ecx # comment:fpstackfrombottom:d0=stack64#10 # asm 2: fstpl >d0=104(%rsp) fstpl 104(%rsp) # comment:fpstackfrombottom:d1=stack64#11 # asm 2: fstpl >d1=112(%rsp) fstpl 112(%rsp) # comment:fpstackfrombottom:d2=stack64#12 # asm 2: fstpl >d2=120(%rsp) fstpl 120(%rsp) # comment:fpstackfrombottom:d3=stack64#13 # asm 2: fstpl >d3=128(%rsp) fstpl 128(%rsp) # comment:fpstackfrombottom: # qhasm: int64 f0 # qhasm: int64 f1 # qhasm: int64 f2 # qhasm: int64 f3 # qhasm: int64 f4 # qhasm: int64 g0 # qhasm: int64 g1 # qhasm: int64 g2 # qhasm: int64 g3 # qhasm: int64 f # qhasm: int64 notf # qhasm: stack64 f1_stack # qhasm: stack64 f2_stack # qhasm: stack64 f3_stack # qhasm: stack64 f4_stack # qhasm: stack64 g0_stack # qhasm: stack64 g1_stack # qhasm: stack64 g2_stack # qhasm: stack64 g3_stack # qhasm: g0 = top d0 # asm 1: movl g0=int64#1d # asm 2: movl g0=%edi movl 108(%rsp),%edi # qhasm: (uint32) g0 &= 63 # asm 1: and $63,g1=int64#2d # asm 2: movl g1=%esi movl 116(%rsp),%esi # qhasm: (uint32) g1 &= 63 # asm 1: and $63,g2=int64#3d # asm 2: movl g2=%edx movl 124(%rsp),%edx # qhasm: (uint32) g2 &= 63 # asm 1: and $63,g3=int64#4d # asm 2: movl g3=%ecx movl 132(%rsp),%ecx # qhasm: (uint32) g3 &= 63 # asm 1: and $63,f1=int64#5d # asm 2: movl f1=%r8d movl 112(%rsp),%r8d # qhasm: carry? (uint32) f1 += g0 # asm 1: add f1_stack=stack64#11 # asm 2: movq f1_stack=112(%rsp) movq %r8,112(%rsp) # qhasm: f2 = bottom d2 # asm 1: movl f2=int64#1d # asm 2: movl f2=%edi movl 120(%rsp),%edi # qhasm: carry? (uint32) f2 += g1 + carry # asm 1: adc f2_stack=stack64#12 # asm 2: movq f2_stack=120(%rsp) movq %rdi,120(%rsp) # qhasm: f3 = bottom d3 # asm 1: movl f3=int64#1d # asm 2: movl f3=%edi movl 128(%rsp),%edi # qhasm: carry? (uint32) f3 += g2 + carry # asm 1: adc f3_stack=stack64#13 # asm 2: movq f3_stack=128(%rsp) movq %rdi,128(%rsp) # qhasm: f4 = 0 # asm 1: mov $0,>f4=int64#1 # asm 2: mov $0,>f4=%rdi mov $0,%rdi # qhasm: carry? (uint32) f4 += g3 + carry # asm 1: adc f4_stack=stack64#14 # asm 2: movq f4_stack=136(%rsp) movq %rdi,136(%rsp) # qhasm: g0 = 5 # asm 1: mov $5,>g0=int64#1 # asm 2: mov $5,>g0=%rdi mov $5,%rdi # qhasm: f0 = bottom d0 # asm 1: movl f0=int64#2d # asm 2: movl f0=%esi movl 104(%rsp),%esi # qhasm: carry? (uint32) g0 += f0 # asm 1: add g0_stack=stack64#10 # asm 2: movq g0_stack=104(%rsp) movq %rdi,104(%rsp) # qhasm: g1 = 0 # asm 1: mov $0,>g1=int64#1 # asm 2: mov $0,>g1=%rdi mov $0,%rdi # qhasm: f1 = f1_stack # asm 1: movq f1=int64#3 # asm 2: movq f1=%rdx movq 112(%rsp),%rdx # qhasm: carry? (uint32) g1 += f1 + carry # asm 1: adc g1_stack=stack64#11 # asm 2: movq g1_stack=112(%rsp) movq %rdi,112(%rsp) # qhasm: g2 = 0 # asm 1: mov $0,>g2=int64#1 # asm 2: mov $0,>g2=%rdi mov $0,%rdi # qhasm: f2 = f2_stack # asm 1: movq f2=int64#4 # asm 2: movq f2=%rcx movq 120(%rsp),%rcx # qhasm: carry? (uint32) g2 += f2 + carry # asm 1: adc g2_stack=stack64#12 # asm 2: movq g2_stack=120(%rsp) movq %rdi,120(%rsp) # qhasm: g3 = 0 # asm 1: mov $0,>g3=int64#1 # asm 2: mov $0,>g3=%rdi mov $0,%rdi # qhasm: f3 = f3_stack # asm 1: movq f3=int64#5 # asm 2: movq f3=%r8 movq 128(%rsp),%r8 # qhasm: carry? (uint32) g3 += f3 + carry # asm 1: adc g3_stack=stack64#13 # asm 2: movq g3_stack=128(%rsp) movq %rdi,128(%rsp) # qhasm: f = 0xfffffffc # asm 1: mov $0xfffffffc,>f=int64#1 # asm 2: mov $0xfffffffc,>f=%rdi mov $0xfffffffc,%rdi # qhasm: f4 = f4_stack # asm 1: movq f4=int64#6 # asm 2: movq f4=%r9 movq 136(%rsp),%r9 # qhasm: carry? (uint32) f += f4 + carry # asm 1: adc >= 16 # asm 1: sar $16,notf=int64#6 # asm 2: mov notf=%r9 mov %rdi,%r9 # qhasm: (uint32) notf ^= 0xffffffff # asm 1: xor $0xffffffff,g0=int64#7 # asm 2: movq g0=%rax movq 104(%rsp),%rax # qhasm: g0 &= notf # asm 1: and g1=int64#7 # asm 2: movq g1=%rax movq 112(%rsp),%rax # qhasm: g1 &= notf # asm 1: and g2=int64#7 # asm 2: movq g2=%rax movq 120(%rsp),%rax # qhasm: g2 &= notf # asm 1: and g3=int64#1 # asm 2: movq g3=%rdi movq 128(%rsp),%rdi # qhasm: g3 &= notf # asm 1: and out=int64#1 # asm 2: movq out=%rdi movq 88(%rsp),%rdi # qhasm: k = k_stack # asm 1: movq k=int64#6 # asm 2: movq k=%r9 movq 96(%rsp),%r9 # qhasm: carry? (uint32) f0 += *(uint32 *) (k + 16) # asm 1: addl 16(r11_caller=int64#9 # asm 2: movq r11_caller=%r11 movq 32(%rsp),%r11 # qhasm: r12_caller = r12_stack # asm 1: movq r12_caller=int64#10 # asm 2: movq r12_caller=%r12 movq 40(%rsp),%r12 # qhasm: r13_caller = r13_stack # asm 1: movq r13_caller=int64#11 # asm 2: movq r13_caller=%r13 movq 48(%rsp),%r13 # qhasm: r14_caller = r14_stack # asm 1: movq r14_caller=int64#12 # asm 2: movq r14_caller=%r14 movq 56(%rsp),%r14 # qhasm: r15_caller = r15_stack # asm 1: movq r15_caller=int64#13 # asm 2: movq r15_caller=%r15 movq 64(%rsp),%r15 # qhasm: rbx_caller = rbx_stack # asm 1: movq rbx_caller=int64#14 # asm 2: movq rbx_caller=%rbx movq 72(%rsp),%rbx # qhasm: rbp_caller = rbp_stack # asm 1: movq rbp_caller=int64#15 # asm 2: movq rbp_caller=%rbp movq 80(%rsp),%rbp # qhasm: leave add %r11,%rsp xor %rax,%rax xor %rdx,%rdx ret curvedns-curvedns-0.87/nacl/crypto_onetimeauth/poly1305/amd64/constants.s000066400000000000000000000045311150631715100264010ustar00rootroot00000000000000# version 20080913 # D. J. Bernstein # Public domain. .data .section .rodata .p2align 5 .globl _crypto_onetimeauth_poly1305_amd64_constants .globl crypto_onetimeauth_poly1305_amd64_constants .globl crypto_onetimeauth_poly1305_amd64_scale .globl crypto_onetimeauth_poly1305_amd64_two32 .globl crypto_onetimeauth_poly1305_amd64_two64 .globl crypto_onetimeauth_poly1305_amd64_two96 .globl crypto_onetimeauth_poly1305_amd64_alpha32 .globl crypto_onetimeauth_poly1305_amd64_alpha64 .globl crypto_onetimeauth_poly1305_amd64_alpha96 .globl crypto_onetimeauth_poly1305_amd64_alpha130 .globl crypto_onetimeauth_poly1305_amd64_doffset0 .globl crypto_onetimeauth_poly1305_amd64_doffset1 .globl crypto_onetimeauth_poly1305_amd64_doffset2 .globl crypto_onetimeauth_poly1305_amd64_doffset3 .globl crypto_onetimeauth_poly1305_amd64_doffset3minustwo128 .globl crypto_onetimeauth_poly1305_amd64_hoffset0 .globl crypto_onetimeauth_poly1305_amd64_hoffset1 .globl crypto_onetimeauth_poly1305_amd64_hoffset2 .globl crypto_onetimeauth_poly1305_amd64_hoffset3 .globl crypto_onetimeauth_poly1305_amd64_rounding _crypto_onetimeauth_poly1305_amd64_constants: crypto_onetimeauth_poly1305_amd64_constants: crypto_onetimeauth_poly1305_amd64_scale: .long 0x0,0x37f40000 crypto_onetimeauth_poly1305_amd64_two32: .long 0x0,0x41f00000 crypto_onetimeauth_poly1305_amd64_two64: .long 0x0,0x43f00000 crypto_onetimeauth_poly1305_amd64_two96: .long 0x0,0x45f00000 crypto_onetimeauth_poly1305_amd64_alpha32: .long 0x0,0x45e80000 crypto_onetimeauth_poly1305_amd64_alpha64: .long 0x0,0x47e80000 crypto_onetimeauth_poly1305_amd64_alpha96: .long 0x0,0x49e80000 crypto_onetimeauth_poly1305_amd64_alpha130: .long 0x0,0x4c080000 crypto_onetimeauth_poly1305_amd64_doffset0: .long 0x0,0x43300000 crypto_onetimeauth_poly1305_amd64_doffset1: .long 0x0,0x45300000 crypto_onetimeauth_poly1305_amd64_doffset2: .long 0x0,0x47300000 crypto_onetimeauth_poly1305_amd64_doffset3: .long 0x0,0x49300000 crypto_onetimeauth_poly1305_amd64_doffset3minustwo128: .long 0x0,0x492ffffe crypto_onetimeauth_poly1305_amd64_hoffset0: .long 0xfffffffb,0x43300001 crypto_onetimeauth_poly1305_amd64_hoffset1: .long 0xfffffffe,0x45300001 crypto_onetimeauth_poly1305_amd64_hoffset2: .long 0xfffffffe,0x47300001 crypto_onetimeauth_poly1305_amd64_hoffset3: .long 0xfffffffe,0x49300003 crypto_onetimeauth_poly1305_amd64_rounding: .byte 0x7f .byte 0x13 curvedns-curvedns-0.87/nacl/crypto_onetimeauth/poly1305/amd64/verify.c000066400000000000000000000004561150631715100256530ustar00rootroot00000000000000#include "crypto_verify_16.h" #include "crypto_onetimeauth.h" int crypto_onetimeauth_verify(const unsigned char *h,const unsigned char *in,unsigned long long inlen,const unsigned char *k) { unsigned char correct[16]; crypto_onetimeauth(correct,in,inlen,k); return crypto_verify_16(h,correct); } curvedns-curvedns-0.87/nacl/crypto_onetimeauth/poly1305/checksum000066400000000000000000000000411150631715100250030ustar00rootroot00000000000000e836d5ca58cf673fca2b4910f23f3990 curvedns-curvedns-0.87/nacl/crypto_onetimeauth/poly1305/ref/000077500000000000000000000000001150631715100240375ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_onetimeauth/poly1305/ref/api.h000066400000000000000000000000631150631715100247600ustar00rootroot00000000000000#define CRYPTO_BYTES 16 #define CRYPTO_KEYBYTES 32 curvedns-curvedns-0.87/nacl/crypto_onetimeauth/poly1305/ref/auth.c000066400000000000000000000041601150631715100251450ustar00rootroot00000000000000/* 20080912 D. J. Bernstein Public domain. */ #include "crypto_onetimeauth.h" static void add(unsigned int h[17],const unsigned int c[17]) { unsigned int j; unsigned int u; u = 0; for (j = 0;j < 17;++j) { u += h[j] + c[j]; h[j] = u & 255; u >>= 8; } } static void squeeze(unsigned int h[17]) { unsigned int j; unsigned int u; u = 0; for (j = 0;j < 16;++j) { u += h[j]; h[j] = u & 255; u >>= 8; } u += h[16]; h[16] = u & 3; u = 5 * (u >> 2); for (j = 0;j < 16;++j) { u += h[j]; h[j] = u & 255; u >>= 8; } u += h[16]; h[16] = u; } static const unsigned int minusp[17] = { 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 252 } ; static void freeze(unsigned int h[17]) { unsigned int horig[17]; unsigned int j; unsigned int negative; for (j = 0;j < 17;++j) horig[j] = h[j]; add(h,minusp); negative = -(h[16] >> 7); for (j = 0;j < 17;++j) h[j] ^= negative & (horig[j] ^ h[j]); } static void mulmod(unsigned int h[17],const unsigned int r[17]) { unsigned int hr[17]; unsigned int i; unsigned int j; unsigned int u; for (i = 0;i < 17;++i) { u = 0; for (j = 0;j <= i;++j) u += h[j] * r[i - j]; for (j = i + 1;j < 17;++j) u += 320 * h[j] * r[i + 17 - j]; hr[i] = u; } for (i = 0;i < 17;++i) h[i] = hr[i]; squeeze(h); } int crypto_onetimeauth(unsigned char *out,const unsigned char *in,unsigned long long inlen,const unsigned char *k) { unsigned int j; unsigned int r[17]; unsigned int h[17]; unsigned int c[17]; r[0] = k[0]; r[1] = k[1]; r[2] = k[2]; r[3] = k[3] & 15; r[4] = k[4] & 252; r[5] = k[5]; r[6] = k[6]; r[7] = k[7] & 15; r[8] = k[8] & 252; r[9] = k[9]; r[10] = k[10]; r[11] = k[11] & 15; r[12] = k[12] & 252; r[13] = k[13]; r[14] = k[14]; r[15] = k[15] & 15; r[16] = 0; for (j = 0;j < 17;++j) h[j] = 0; while (inlen > 0) { for (j = 0;j < 17;++j) c[j] = 0; for (j = 0;(j < 16) && (j < inlen);++j) c[j] = in[j]; c[j] = 1; in += j; inlen -= j; add(h,c); mulmod(h,r); } freeze(h); for (j = 0;j < 16;++j) c[j] = k[j + 16]; c[16] = 0; add(h,c); for (j = 0;j < 16;++j) out[j] = h[j]; return 0; } curvedns-curvedns-0.87/nacl/crypto_onetimeauth/poly1305/ref/verify.c000066400000000000000000000004561150631715100255140ustar00rootroot00000000000000#include "crypto_verify_16.h" #include "crypto_onetimeauth.h" int crypto_onetimeauth_verify(const unsigned char *h,const unsigned char *in,unsigned long long inlen,const unsigned char *k) { unsigned char correct[16]; crypto_onetimeauth(correct,in,inlen,k); return crypto_verify_16(h,correct); } curvedns-curvedns-0.87/nacl/crypto_onetimeauth/poly1305/selected000066400000000000000000000000001150631715100247640ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_onetimeauth/poly1305/used000066400000000000000000000000001150631715100241340ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_onetimeauth/poly1305/x86/000077500000000000000000000000001150631715100237105ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_onetimeauth/poly1305/x86/api.h000066400000000000000000000000631150631715100246310ustar00rootroot00000000000000#define CRYPTO_BYTES 16 #define CRYPTO_KEYBYTES 32 curvedns-curvedns-0.87/nacl/crypto_onetimeauth/poly1305/x86/auth.s000066400000000000000000002365521150631715100250520ustar00rootroot00000000000000 # qhasm: stack32 arg_out # qhasm: stack32 arg_m # qhasm: stack32 arg_l # qhasm: stack32 arg_ltop # qhasm: stack32 arg_k # qhasm: input arg_out # qhasm: input arg_m # qhasm: input arg_l # qhasm: input arg_ltop # qhasm: input arg_k # qhasm: int32 eax # qhasm: int32 ebx # qhasm: int32 esi # qhasm: int32 edi # qhasm: int32 ebp # qhasm: caller eax # qhasm: caller ebx # qhasm: caller esi # qhasm: caller edi # qhasm: caller ebp # qhasm: stack32 eax_stack # qhasm: stack32 ebx_stack # qhasm: stack32 esi_stack # qhasm: stack32 edi_stack # qhasm: stack32 ebp_stack # qhasm: int32 out # qhasm: stack32 out_stack # qhasm: int32 k # qhasm: stack32 k_stack # qhasm: int32 m # qhasm: int32 l # qhasm: int32 m0 # qhasm: int32 m1 # qhasm: int32 m2 # qhasm: int32 m3 # qhasm: float80 a0 # qhasm: float80 a1 # qhasm: float80 a2 # qhasm: float80 a3 # qhasm: float80 h0 # qhasm: float80 h1 # qhasm: float80 h2 # qhasm: float80 h3 # qhasm: float80 x0 # qhasm: float80 x1 # qhasm: float80 x2 # qhasm: float80 x3 # qhasm: float80 y0 # qhasm: float80 y1 # qhasm: float80 y2 # qhasm: float80 y3 # qhasm: float80 r0x0 # qhasm: float80 r1x0 # qhasm: float80 r2x0 # qhasm: float80 r3x0 # qhasm: float80 r0x1 # qhasm: float80 r1x1 # qhasm: float80 r2x1 # qhasm: float80 sr3x1 # qhasm: float80 r0x2 # qhasm: float80 r1x2 # qhasm: float80 sr2x2 # qhasm: float80 sr3x2 # qhasm: float80 r0x3 # qhasm: float80 sr1x3 # qhasm: float80 sr2x3 # qhasm: float80 sr3x3 # qhasm: stack64 d0 # qhasm: stack64 d1 # qhasm: stack64 d2 # qhasm: stack64 d3 # qhasm: stack64 r0 # qhasm: stack64 r1 # qhasm: stack64 r2 # qhasm: stack64 r3 # qhasm: stack64 sr1 # qhasm: stack64 sr2 # qhasm: stack64 sr3 # qhasm: enter crypto_onetimeauth_poly1305_x86 stackaligned4096 crypto_onetimeauth_poly1305_x86_constants .text .p2align 5 .globl _crypto_onetimeauth_poly1305_x86 .globl crypto_onetimeauth_poly1305_x86 _crypto_onetimeauth_poly1305_x86: crypto_onetimeauth_poly1305_x86: mov %esp,%eax sub $crypto_onetimeauth_poly1305_x86_constants,%eax and $4095,%eax add $192,%eax sub %eax,%esp # qhasm: eax_stack = eax # asm 1: movl eax_stack=stack32#1 # asm 2: movl eax_stack=0(%esp) movl %eax,0(%esp) # qhasm: ebx_stack = ebx # asm 1: movl ebx_stack=stack32#2 # asm 2: movl ebx_stack=4(%esp) movl %ebx,4(%esp) # qhasm: esi_stack = esi # asm 1: movl esi_stack=stack32#3 # asm 2: movl esi_stack=8(%esp) movl %esi,8(%esp) # qhasm: edi_stack = edi # asm 1: movl edi_stack=stack32#4 # asm 2: movl edi_stack=12(%esp) movl %edi,12(%esp) # qhasm: ebp_stack = ebp # asm 1: movl ebp_stack=stack32#5 # asm 2: movl ebp_stack=16(%esp) movl %ebp,16(%esp) # qhasm: round *(uint16 *) &crypto_onetimeauth_poly1305_x86_rounding fldcw crypto_onetimeauth_poly1305_x86_rounding # qhasm: k = arg_k # asm 1: movl k=int32#3 # asm 2: movl k=%edx movl 20(%esp,%eax),%edx # qhasm: m0 = *(uint32 *) (k + 0) # asm 1: movl 0(m0=int32#2 # asm 2: movl 0(m0=%ecx movl 0(%edx),%ecx # qhasm: m1 = *(uint32 *) (k + 4) # asm 1: movl 4(m1=int32#4 # asm 2: movl 4(m1=%ebx movl 4(%edx),%ebx # qhasm: m2 = *(uint32 *) (k + 8) # asm 1: movl 8(m2=int32#5 # asm 2: movl 8(m2=%esi movl 8(%edx),%esi # qhasm: m3 = *(uint32 *) (k + 12) # asm 1: movl 12(m3=int32#6 # asm 2: movl 12(m3=%edi movl 12(%edx),%edi # qhasm: d0 top = 0x43300000 # asm 1: movl $0x43300000,>d0=stack64#1 # asm 2: movl $0x43300000,>d0=100(%esp) movl $0x43300000,100(%esp) # qhasm: d1 top = 0x45300000 # asm 1: movl $0x45300000,>d1=stack64#2 # asm 2: movl $0x45300000,>d1=108(%esp) movl $0x45300000,108(%esp) # qhasm: d2 top = 0x47300000 # asm 1: movl $0x47300000,>d2=stack64#3 # asm 2: movl $0x47300000,>d2=116(%esp) movl $0x47300000,116(%esp) # qhasm: d3 top = 0x49300000 # asm 1: movl $0x49300000,>d3=stack64#4 # asm 2: movl $0x49300000,>d3=124(%esp) movl $0x49300000,124(%esp) # qhasm: m0 &= 0x0fffffff # asm 1: and $0x0fffffff,r0=stack64#5 # asm 2: fstpl >r0=128(%esp) fstpl 128(%esp) # comment:fpstackfrombottom:r1=stack64#6 # asm 2: fstl >r1=136(%esp) fstl 136(%esp) # comment:fpstackfrombottom:sr1=stack64#7 # asm 2: fstpl >sr1=144(%esp) fstpl 144(%esp) # comment:fpstackfrombottom:r2=stack64#8 # asm 2: fstl >r2=152(%esp) fstl 152(%esp) # comment:fpstackfrombottom:sr2=stack64#9 # asm 2: fstpl >sr2=160(%esp) fstpl 160(%esp) # comment:fpstackfrombottom:r3=stack64#10 # asm 2: fstl >r3=168(%esp) fstl 168(%esp) # comment:fpstackfrombottom:sr3=stack64#11 # asm 2: fstpl >sr3=176(%esp) fstpl 176(%esp) # comment:fpstackfrombottom: # qhasm: out = arg_out # asm 1: movl out=int32#4 # asm 2: movl out=%ebx movl 4(%esp,%eax),%ebx # qhasm: m = arg_m # asm 1: movl m=int32#5 # asm 2: movl m=%esi movl 8(%esp,%eax),%esi # qhasm: l = arg_l # asm 1: movl l=int32#2 # asm 2: movl l=%ecx movl 12(%esp,%eax),%ecx # qhasm: h3 = 0 fldz # comment:fpstackfrombottom:k_stack=stack32#6 # asm 2: movl k_stack=20(%esp) movl %edx,20(%esp) # comment:fpstackfrombottom:out_stack=stack32#7 # asm 2: movl out_stack=24(%esp) movl %ebx,24(%esp) # comment:fpstackfrombottom:m3=int32#1 # asm 2: movl 12(m3=%eax movl 12(%esi),%eax # comment:fpstackfrombottom:m2=int32#3 # asm 2: movl 8(m2=%edx movl 8(%esi),%edx # comment:fpstackfrombottom:m1=int32#4 # asm 2: movl 4(m1=%ebx movl 4(%esi),%ebx # comment:fpstackfrombottom:m0=int32#6 # asm 2: movl 0(m0=%edi movl 0(%esi),%edi # comment:fpstackfrombottom:m3=int32#1 # asm 2: movl 12(m3=%eax movl 12(%esi),%eax # comment:fpstackfrombottom:m2=int32#3 # asm 2: movl 8(m2=%edx movl 8(%esi),%edx # comment:fpstackfrombottom:m1=int32#4 # asm 2: movl 4(m1=%ebx movl 4(%esi),%ebx # comment:fpstackfrombottom:m0=int32#6 # asm 2: movl 0(m0=%edi movl 0(%esi),%edi # comment:fpstackfrombottom:lastchunk=stack128#1 # asm 2: movl $0,>lastchunk=64(%esp) movl $0,64(%esp) # comment:fpstackfrombottom:destination=int32#6 # asm 2: leal destination=%edi leal 64(%esp),%edi # comment:fpstackfrombottom:m3=int32#1 # asm 2: movl 12+m3=%eax movl 12+64(%esp),%eax # comment:fpstackfrombottom:m2=int32#2 # asm 2: movl 8+m2=%ecx movl 8+64(%esp),%ecx # comment:fpstackfrombottom:m1=int32#3 # asm 2: movl 4+m1=%edx movl 4+64(%esp),%edx # comment:fpstackfrombottom:m0=int32#4 # asm 2: movl m0=%ebx movl 64(%esp),%ebx # comment:fpstackfrombottom:d0=stack64#1 # asm 2: fstpl >d0=96(%esp) fstpl 96(%esp) # comment:fpstackfrombottom:d1=stack64#2 # asm 2: fstpl >d1=104(%esp) fstpl 104(%esp) # comment:fpstackfrombottom:d2=stack64#3 # asm 2: fstpl >d2=112(%esp) fstpl 112(%esp) # comment:fpstackfrombottom:d3=stack64#4 # asm 2: fstpl >d3=120(%esp) fstpl 120(%esp) # comment:fpstackfrombottom: # qhasm: int32 f0 # qhasm: int32 f1 # qhasm: int32 f2 # qhasm: int32 f3 # qhasm: int32 f4 # qhasm: int32 g0 # qhasm: int32 g1 # qhasm: int32 g2 # qhasm: int32 g3 # qhasm: int32 f # qhasm: int32 notf # qhasm: stack32 f1_stack # qhasm: stack32 f2_stack # qhasm: stack32 f3_stack # qhasm: stack32 f4_stack # qhasm: stack32 g0_stack # qhasm: stack32 g1_stack # qhasm: stack32 g2_stack # qhasm: stack32 g3_stack # qhasm: g0 = top d0 # asm 1: movl g0=int32#1 # asm 2: movl g0=%eax movl 100(%esp),%eax # qhasm: g0 &= 63 # asm 1: and $63,g1=int32#2 # asm 2: movl g1=%ecx movl 108(%esp),%ecx # qhasm: g1 &= 63 # asm 1: and $63,g2=int32#3 # asm 2: movl g2=%edx movl 116(%esp),%edx # qhasm: g2 &= 63 # asm 1: and $63,g3=int32#4 # asm 2: movl g3=%ebx movl 124(%esp),%ebx # qhasm: g3 &= 63 # asm 1: and $63,f1=int32#5 # asm 2: movl f1=%esi movl 104(%esp),%esi # qhasm: carry? f1 += g0 # asm 1: addl f1_stack=stack32#8 # asm 2: movl f1_stack=28(%esp) movl %esi,28(%esp) # qhasm: f2 = bottom d2 # asm 1: movl f2=int32#1 # asm 2: movl f2=%eax movl 112(%esp),%eax # qhasm: carry? f2 += g1 + carry # asm 1: adcl f2_stack=stack32#9 # asm 2: movl f2_stack=32(%esp) movl %eax,32(%esp) # qhasm: f3 = bottom d3 # asm 1: movl f3=int32#1 # asm 2: movl f3=%eax movl 120(%esp),%eax # qhasm: carry? f3 += g2 + carry # asm 1: adcl f3_stack=stack32#10 # asm 2: movl f3_stack=36(%esp) movl %eax,36(%esp) # qhasm: f4 = 0 # asm 1: mov $0,>f4=int32#1 # asm 2: mov $0,>f4=%eax mov $0,%eax # qhasm: carry? f4 += g3 + carry # asm 1: adcl f4_stack=stack32#11 # asm 2: movl f4_stack=40(%esp) movl %eax,40(%esp) # qhasm: g0 = 5 # asm 1: mov $5,>g0=int32#1 # asm 2: mov $5,>g0=%eax mov $5,%eax # qhasm: f0 = bottom d0 # asm 1: movl f0=int32#2 # asm 2: movl f0=%ecx movl 96(%esp),%ecx # qhasm: carry? g0 += f0 # asm 1: addl g0_stack=stack32#12 # asm 2: movl g0_stack=44(%esp) movl %eax,44(%esp) # qhasm: g1 = 0 # asm 1: mov $0,>g1=int32#1 # asm 2: mov $0,>g1=%eax mov $0,%eax # qhasm: f1 = f1_stack # asm 1: movl f1=int32#3 # asm 2: movl f1=%edx movl 28(%esp),%edx # qhasm: carry? g1 += f1 + carry # asm 1: adcl g1_stack=stack32#8 # asm 2: movl g1_stack=28(%esp) movl %eax,28(%esp) # qhasm: g2 = 0 # asm 1: mov $0,>g2=int32#1 # asm 2: mov $0,>g2=%eax mov $0,%eax # qhasm: f2 = f2_stack # asm 1: movl f2=int32#4 # asm 2: movl f2=%ebx movl 32(%esp),%ebx # qhasm: carry? g2 += f2 + carry # asm 1: adcl g2_stack=stack32#9 # asm 2: movl g2_stack=32(%esp) movl %eax,32(%esp) # qhasm: g3 = 0 # asm 1: mov $0,>g3=int32#1 # asm 2: mov $0,>g3=%eax mov $0,%eax # qhasm: f3 = f3_stack # asm 1: movl f3=int32#5 # asm 2: movl f3=%esi movl 36(%esp),%esi # qhasm: carry? g3 += f3 + carry # asm 1: adcl g3_stack=stack32#10 # asm 2: movl g3_stack=36(%esp) movl %eax,36(%esp) # qhasm: f = 0xfffffffc # asm 1: mov $0xfffffffc,>f=int32#1 # asm 2: mov $0xfffffffc,>f=%eax mov $0xfffffffc,%eax # qhasm: f4 = f4_stack # asm 1: movl f4=int32#6 # asm 2: movl f4=%edi movl 40(%esp),%edi # qhasm: carry? f += f4 + carry # asm 1: adcl >= 16 # asm 1: sar $16,notf=int32#6 # asm 2: mov notf=%edi mov %eax,%edi # qhasm: notf ^= 0xffffffff # asm 1: xor $0xffffffff,g0=int32#7 # asm 2: movl g0=%ebp movl 44(%esp),%ebp # qhasm: g0 &= notf # asm 1: andl g1=int32#7 # asm 2: movl g1=%ebp movl 28(%esp),%ebp # qhasm: g1 &= notf # asm 1: andl g2=int32#7 # asm 2: movl g2=%ebp movl 32(%esp),%ebp # qhasm: g2 &= notf # asm 1: andl g3=int32#1 # asm 2: movl g3=%eax movl 36(%esp),%eax # qhasm: g3 &= notf # asm 1: andl k=int32#1 # asm 2: movl k=%eax movl 20(%esp),%eax # qhasm: carry? f0 += *(uint32 *) (k + 16) # asm 1: addl 16(out=int32#1 # asm 2: movl out=%eax movl 24(%esp),%eax # qhasm: *(uint32 *) (out + 0) = f0 # asm 1: movl eax=int32#1 # asm 2: movl eax=%eax movl 0(%esp),%eax # qhasm: ebx = ebx_stack # asm 1: movl ebx=int32#4 # asm 2: movl ebx=%ebx movl 4(%esp),%ebx # qhasm: esi = esi_stack # asm 1: movl esi=int32#5 # asm 2: movl esi=%esi movl 8(%esp),%esi # qhasm: edi = edi_stack # asm 1: movl edi=int32#6 # asm 2: movl edi=%edi movl 12(%esp),%edi # qhasm: ebp = ebp_stack # asm 1: movl ebp=int32#7 # asm 2: movl ebp=%ebp movl 16(%esp),%ebp # qhasm: leave add %eax,%esp xor %eax,%eax ret curvedns-curvedns-0.87/nacl/crypto_onetimeauth/poly1305/x86/constants.s000066400000000000000000000044111150631715100261100ustar00rootroot00000000000000# version 20080912 # D. J. Bernstein # Public domain. .data .section .rodata .p2align 5 .globl _crypto_onetimeauth_poly1305_x86_constants .globl crypto_onetimeauth_poly1305_x86_constants .globl crypto_onetimeauth_poly1305_x86_scale .globl crypto_onetimeauth_poly1305_x86_two32 .globl crypto_onetimeauth_poly1305_x86_two64 .globl crypto_onetimeauth_poly1305_x86_two96 .globl crypto_onetimeauth_poly1305_x86_alpha32 .globl crypto_onetimeauth_poly1305_x86_alpha64 .globl crypto_onetimeauth_poly1305_x86_alpha96 .globl crypto_onetimeauth_poly1305_x86_alpha130 .globl crypto_onetimeauth_poly1305_x86_doffset0 .globl crypto_onetimeauth_poly1305_x86_doffset1 .globl crypto_onetimeauth_poly1305_x86_doffset2 .globl crypto_onetimeauth_poly1305_x86_doffset3 .globl crypto_onetimeauth_poly1305_x86_doffset3minustwo128 .globl crypto_onetimeauth_poly1305_x86_hoffset0 .globl crypto_onetimeauth_poly1305_x86_hoffset1 .globl crypto_onetimeauth_poly1305_x86_hoffset2 .globl crypto_onetimeauth_poly1305_x86_hoffset3 .globl crypto_onetimeauth_poly1305_x86_rounding _crypto_onetimeauth_poly1305_x86_constants: crypto_onetimeauth_poly1305_x86_constants: crypto_onetimeauth_poly1305_x86_scale: .long 0x0,0x37f40000 crypto_onetimeauth_poly1305_x86_two32: .long 0x0,0x41f00000 crypto_onetimeauth_poly1305_x86_two64: .long 0x0,0x43f00000 crypto_onetimeauth_poly1305_x86_two96: .long 0x0,0x45f00000 crypto_onetimeauth_poly1305_x86_alpha32: .long 0x0,0x45e80000 crypto_onetimeauth_poly1305_x86_alpha64: .long 0x0,0x47e80000 crypto_onetimeauth_poly1305_x86_alpha96: .long 0x0,0x49e80000 crypto_onetimeauth_poly1305_x86_alpha130: .long 0x0,0x4c080000 crypto_onetimeauth_poly1305_x86_doffset0: .long 0x0,0x43300000 crypto_onetimeauth_poly1305_x86_doffset1: .long 0x0,0x45300000 crypto_onetimeauth_poly1305_x86_doffset2: .long 0x0,0x47300000 crypto_onetimeauth_poly1305_x86_doffset3: .long 0x0,0x49300000 crypto_onetimeauth_poly1305_x86_doffset3minustwo128: .long 0x0,0x492ffffe crypto_onetimeauth_poly1305_x86_hoffset0: .long 0xfffffffb,0x43300001 crypto_onetimeauth_poly1305_x86_hoffset1: .long 0xfffffffe,0x45300001 crypto_onetimeauth_poly1305_x86_hoffset2: .long 0xfffffffe,0x47300001 crypto_onetimeauth_poly1305_x86_hoffset3: .long 0xfffffffe,0x49300003 crypto_onetimeauth_poly1305_x86_rounding: .byte 0x7f .byte 0x13 curvedns-curvedns-0.87/nacl/crypto_onetimeauth/poly1305/x86/verify.c000066400000000000000000000004561150631715100253650ustar00rootroot00000000000000#include "crypto_verify_16.h" #include "crypto_onetimeauth.h" int crypto_onetimeauth_verify(const unsigned char *h,const unsigned char *in,unsigned long long inlen,const unsigned char *k) { unsigned char correct[16]; crypto_onetimeauth(correct,in,inlen,k); return crypto_verify_16(h,correct); } curvedns-curvedns-0.87/nacl/crypto_onetimeauth/try.c000066400000000000000000000104321150631715100227510ustar00rootroot00000000000000/* * crypto_onetimeauth/try.c version 20090118 * D. J. Bernstein * Public domain. */ #include "crypto_hash_sha256.h" #include "crypto_onetimeauth.h" extern unsigned char *alignedcalloc(unsigned long long); const char *primitiveimplementation = crypto_onetimeauth_IMPLEMENTATION; #define MAXTEST_BYTES 10000 #define CHECKSUM_BYTES 4096 #define TUNE_BYTES 1536 static unsigned char *h; static unsigned char *m; static unsigned char *k; static unsigned char *h2; static unsigned char *m2; static unsigned char *k2; void preallocate(void) { } void allocate(void) { h = alignedcalloc(crypto_onetimeauth_BYTES); m = alignedcalloc(MAXTEST_BYTES); k = alignedcalloc(crypto_onetimeauth_KEYBYTES); h2 = alignedcalloc(crypto_onetimeauth_BYTES); m2 = alignedcalloc(MAXTEST_BYTES + crypto_onetimeauth_BYTES); k2 = alignedcalloc(crypto_onetimeauth_KEYBYTES + crypto_onetimeauth_BYTES); } void predoit(void) { } void doit(void) { crypto_onetimeauth(h,m,TUNE_BYTES,k); crypto_onetimeauth_verify(h,m,TUNE_BYTES,k); } char checksum[crypto_onetimeauth_BYTES * 2 + 1]; const char *checksum_compute(void) { long long i; long long j; for (i = 0;i < CHECKSUM_BYTES;++i) { long long mlen = i; long long klen = crypto_onetimeauth_KEYBYTES; long long hlen = crypto_onetimeauth_BYTES; for (j = -16;j < 0;++j) h[j] = random(); for (j = -16;j < 0;++j) k[j] = random(); for (j = -16;j < 0;++j) m[j] = random(); for (j = hlen;j < hlen + 16;++j) h[j] = random(); for (j = klen;j < klen + 16;++j) k[j] = random(); for (j = mlen;j < mlen + 16;++j) m[j] = random(); for (j = -16;j < hlen + 16;++j) h2[j] = h[j]; for (j = -16;j < klen + 16;++j) k2[j] = k[j]; for (j = -16;j < mlen + 16;++j) m2[j] = m[j]; if (crypto_onetimeauth(h,m,mlen,k) != 0) return "crypto_onetimeauth returns nonzero"; for (j = -16;j < klen + 16;++j) if (k[j] != k2[j]) return "crypto_onetimeauth overwrites k"; for (j = -16;j < mlen + 16;++j) if (m[j] != m2[j]) return "crypto_onetimeauth overwrites m"; for (j = -16;j < 0;++j) if (h[j] != h2[j]) return "crypto_onetimeauth writes before output"; for (j = hlen;j < hlen + 16;++j) if (h[j] != h2[j]) return "crypto_onetimeauth writes after output"; for (j = -16;j < 0;++j) h[j] = random(); for (j = -16;j < 0;++j) k[j] = random(); for (j = -16;j < 0;++j) m[j] = random(); for (j = hlen;j < hlen + 16;++j) h[j] = random(); for (j = klen;j < klen + 16;++j) k[j] = random(); for (j = mlen;j < mlen + 16;++j) m[j] = random(); for (j = -16;j < hlen + 16;++j) h2[j] = h[j]; for (j = -16;j < klen + 16;++j) k2[j] = k[j]; for (j = -16;j < mlen + 16;++j) m2[j] = m[j]; if (crypto_onetimeauth(m2,m2,mlen,k) != 0) return "crypto_onetimeauth returns nonzero"; for (j = 0;j < hlen;++j) if (m2[j] != h[j]) return "crypto_onetimeauth does not handle m overlap"; for (j = 0;j < hlen;++j) m2[j] = m[j]; if (crypto_onetimeauth(k2,m2,mlen,k2) != 0) return "crypto_onetimeauth returns nonzero"; for (j = 0;j < hlen;++j) if (k2[j] != h[j]) return "crypto_onetimeauth does not handle k overlap"; for (j = 0;j < hlen;++j) k2[j] = k[j]; if (crypto_onetimeauth_verify(h,m,mlen,k) != 0) return "crypto_onetimeauth_verify returns nonzero"; for (j = -16;j < hlen + 16;++j) if (h[j] != h2[j]) return "crypto_onetimeauth overwrites h"; for (j = -16;j < klen + 16;++j) if (k[j] != k2[j]) return "crypto_onetimeauth overwrites k"; for (j = -16;j < mlen + 16;++j) if (m[j] != m2[j]) return "crypto_onetimeauth overwrites m"; crypto_hash_sha256(h2,h,hlen); for (j = 0;j < klen;++j) k[j] ^= h2[j % 32]; if (crypto_onetimeauth(h,m,mlen,k) != 0) return "crypto_onetimeauth returns nonzero"; if (crypto_onetimeauth_verify(h,m,mlen,k) != 0) return "crypto_onetimeauth_verify returns nonzero"; crypto_hash_sha256(h2,h,hlen); for (j = 0;j < mlen;++j) m[j] ^= h2[j % 32]; m[mlen] = h2[0]; } if (crypto_onetimeauth(h,m,CHECKSUM_BYTES,k) != 0) return "crypto_onetimeauth returns nonzero"; if (crypto_onetimeauth_verify(h,m,CHECKSUM_BYTES,k) != 0) return "crypto_onetimeauth_verify returns nonzero"; for (i = 0;i < crypto_onetimeauth_BYTES;++i) { checksum[2 * i] = "0123456789abcdef"[15 & (h[i] >> 4)]; checksum[2 * i + 1] = "0123456789abcdef"[15 & h[i]]; } checksum[2 * i] = 0; return 0; } curvedns-curvedns-0.87/nacl/crypto_onetimeauth/wrapper-auth.cpp000066400000000000000000000006331150631715100251140ustar00rootroot00000000000000#include using std::string; #include "crypto_onetimeauth.h" string crypto_onetimeauth(const string &m,const string &k) { if (k.size() != crypto_onetimeauth_KEYBYTES) throw "incorrect key length"; unsigned char a[crypto_onetimeauth_BYTES]; crypto_onetimeauth(a,(const unsigned char *) m.c_str(),m.size(),(const unsigned char *) k.c_str()); return string((char *) a,crypto_onetimeauth_BYTES); } curvedns-curvedns-0.87/nacl/crypto_onetimeauth/wrapper-verify.cpp000066400000000000000000000010221150631715100254500ustar00rootroot00000000000000#include using std::string; #include "crypto_onetimeauth.h" void crypto_onetimeauth_verify(const string &a,const string &m,const string &k) { if (k.size() != crypto_onetimeauth_KEYBYTES) throw "incorrect key length"; if (a.size() != crypto_onetimeauth_BYTES) throw "incorrect authenticator length"; if (crypto_onetimeauth_verify( (const unsigned char *) a.c_str(), (const unsigned char *) m.c_str(),m.size(), (const unsigned char *) k.c_str()) == 0) return; throw "invalid authenticator"; } curvedns-curvedns-0.87/nacl/crypto_scalarmult/000077500000000000000000000000001150631715100216145ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_scalarmult/curve25519/000077500000000000000000000000001150631715100233465ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_scalarmult/curve25519/athlon/000077500000000000000000000000001150631715100246335ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_scalarmult/curve25519/athlon/api.h000066400000000000000000000000661150631715100255570ustar00rootroot00000000000000#define CRYPTO_BYTES 32 #define CRYPTO_SCALARBYTES 32 curvedns-curvedns-0.87/nacl/crypto_scalarmult/curve25519/athlon/base.c000066400000000000000000000002651150631715100257140ustar00rootroot00000000000000#include "crypto_scalarmult.h" static char basepoint[32] = {9}; int crypto_scalarmult_base(unsigned char *q,const unsigned char *n) { return crypto_scalarmult(q,n,basepoint); } curvedns-curvedns-0.87/nacl/crypto_scalarmult/curve25519/athlon/const.s000066400000000000000000000105241150631715100261470ustar00rootroot00000000000000.data .section .rodata .p2align 5 .globl crypto_scalarmult_curve25519_athlon_scale .globl crypto_scalarmult_curve25519_athlon_121665 .globl crypto_scalarmult_curve25519_athlon_alpha26 .globl crypto_scalarmult_curve25519_athlon_alpha51 .globl crypto_scalarmult_curve25519_athlon_alpha77 .globl crypto_scalarmult_curve25519_athlon_alpha102 .globl crypto_scalarmult_curve25519_athlon_alpha128 .globl crypto_scalarmult_curve25519_athlon_alpha153 .globl crypto_scalarmult_curve25519_athlon_alpha179 .globl crypto_scalarmult_curve25519_athlon_alpha204 .globl crypto_scalarmult_curve25519_athlon_alpha230 .globl crypto_scalarmult_curve25519_athlon_alpha255 .globl crypto_scalarmult_curve25519_athlon_in0offset .globl crypto_scalarmult_curve25519_athlon_in1offset .globl crypto_scalarmult_curve25519_athlon_in2offset .globl crypto_scalarmult_curve25519_athlon_in3offset .globl crypto_scalarmult_curve25519_athlon_in4offset .globl crypto_scalarmult_curve25519_athlon_in5offset .globl crypto_scalarmult_curve25519_athlon_in6offset .globl crypto_scalarmult_curve25519_athlon_in7offset .globl crypto_scalarmult_curve25519_athlon_in8offset .globl crypto_scalarmult_curve25519_athlon_in9offset .globl crypto_scalarmult_curve25519_athlon_out0offset .globl crypto_scalarmult_curve25519_athlon_out1offset .globl crypto_scalarmult_curve25519_athlon_out2offset .globl crypto_scalarmult_curve25519_athlon_out3offset .globl crypto_scalarmult_curve25519_athlon_out4offset .globl crypto_scalarmult_curve25519_athlon_out5offset .globl crypto_scalarmult_curve25519_athlon_out6offset .globl crypto_scalarmult_curve25519_athlon_out7offset .globl crypto_scalarmult_curve25519_athlon_out8offset .globl crypto_scalarmult_curve25519_athlon_out9offset .globl crypto_scalarmult_curve25519_athlon_two0 .globl crypto_scalarmult_curve25519_athlon_two1 .globl crypto_scalarmult_curve25519_athlon_zero .globl crypto_scalarmult_curve25519_athlon_rounding crypto_scalarmult_curve25519_athlon_scale: .long 0x0,0x30430000 crypto_scalarmult_curve25519_athlon_121665: .long 0x0,0x40fdb410 crypto_scalarmult_curve25519_athlon_in0offset: .long 0x0,0x43300000 crypto_scalarmult_curve25519_athlon_in1offset: .long 0x0,0x45300000 crypto_scalarmult_curve25519_athlon_in2offset: .long 0x0,0x46b00000 crypto_scalarmult_curve25519_athlon_in3offset: .long 0x0,0x48300000 crypto_scalarmult_curve25519_athlon_in4offset: .long 0x0,0x49b00000 crypto_scalarmult_curve25519_athlon_in5offset: .long 0x0,0x4b300000 crypto_scalarmult_curve25519_athlon_in6offset: .long 0x0,0x4d300000 crypto_scalarmult_curve25519_athlon_in7offset: .long 0x0,0x4eb00000 crypto_scalarmult_curve25519_athlon_in8offset: .long 0x0,0x50300000 crypto_scalarmult_curve25519_athlon_in9offset: .long 0x0,0x51b00000 crypto_scalarmult_curve25519_athlon_alpha26: .long 0x0,0x45880000 crypto_scalarmult_curve25519_athlon_alpha51: .long 0x0,0x47180000 crypto_scalarmult_curve25519_athlon_alpha77: .long 0x0,0x48b80000 crypto_scalarmult_curve25519_athlon_alpha102: .long 0x0,0x4a480000 crypto_scalarmult_curve25519_athlon_alpha128: .long 0x0,0x4be80000 crypto_scalarmult_curve25519_athlon_alpha153: .long 0x0,0x4d780000 crypto_scalarmult_curve25519_athlon_alpha179: .long 0x0,0x4f180000 crypto_scalarmult_curve25519_athlon_alpha204: .long 0x0,0x50a80000 crypto_scalarmult_curve25519_athlon_alpha230: .long 0x0,0x52480000 crypto_scalarmult_curve25519_athlon_alpha255: .long 0x0,0x53d80000 crypto_scalarmult_curve25519_athlon_two0: .long 0x0,0x3ff00000 crypto_scalarmult_curve25519_athlon_two1: .long 0x0,0x40000000 crypto_scalarmult_curve25519_athlon_zero: .long 0x0,0x0 crypto_scalarmult_curve25519_athlon_out0offset: .long 0x1fffffed,0x43380000 crypto_scalarmult_curve25519_athlon_out1offset: .long 0xffffff8,0x44d80000 crypto_scalarmult_curve25519_athlon_out2offset: .long 0x1ffffff8,0x46680000 crypto_scalarmult_curve25519_athlon_out3offset: .long 0xffffff8,0x48080000 crypto_scalarmult_curve25519_athlon_out4offset: .long 0x1ffffff8,0x49980000 crypto_scalarmult_curve25519_athlon_out5offset: .long 0xffffff8,0x4b380000 crypto_scalarmult_curve25519_athlon_out6offset: .long 0x1ffffff8,0x4cc80000 crypto_scalarmult_curve25519_athlon_out7offset: .long 0xffffff8,0x4e680000 crypto_scalarmult_curve25519_athlon_out8offset: .long 0x1ffffff8,0x4ff80000 crypto_scalarmult_curve25519_athlon_out9offset: .long 0x1fffff8,0x51980000 crypto_scalarmult_curve25519_athlon_rounding: .byte 0x7f .byte 0x13 curvedns-curvedns-0.87/nacl/crypto_scalarmult/curve25519/athlon/fromdouble.s000066400000000000000000000074471150631715100271710ustar00rootroot00000000000000.text .p2align 5 .globl _crypto_scalarmult_curve25519_athlon_fromdouble .globl crypto_scalarmult_curve25519_athlon_fromdouble _crypto_scalarmult_curve25519_athlon_fromdouble: crypto_scalarmult_curve25519_athlon_fromdouble: mov %esp,%eax and $31,%eax add $192,%eax sub %eax,%esp movl %ebp,0(%esp) movl 8(%esp,%eax),%ecx fldl 0(%ecx) faddl crypto_scalarmult_curve25519_athlon_out0offset fstpl 96(%esp) fldl 8(%ecx) faddl crypto_scalarmult_curve25519_athlon_out1offset fstpl 104(%esp) fldl 16(%ecx) faddl crypto_scalarmult_curve25519_athlon_out2offset fstpl 112(%esp) fldl 24(%ecx) faddl crypto_scalarmult_curve25519_athlon_out3offset fstpl 120(%esp) fldl 32(%ecx) faddl crypto_scalarmult_curve25519_athlon_out4offset fstpl 128(%esp) fldl 40(%ecx) faddl crypto_scalarmult_curve25519_athlon_out5offset fstpl 136(%esp) fldl 48(%ecx) faddl crypto_scalarmult_curve25519_athlon_out6offset fstpl 144(%esp) fldl 56(%ecx) faddl crypto_scalarmult_curve25519_athlon_out7offset fstpl 152(%esp) fldl 64(%ecx) faddl crypto_scalarmult_curve25519_athlon_out8offset fstpl 160(%esp) fldl 72(%ecx) faddl crypto_scalarmult_curve25519_athlon_out9offset fstpl 168(%esp) movl 96(%esp),%ecx movl %ecx,4(%esp) movl 104(%esp),%ecx shl $26,%ecx movl %ecx,40(%esp) movl 104(%esp),%ecx shr $6,%ecx movl %ecx,8(%esp) movl 112(%esp),%ecx shl $19,%ecx movl %ecx,44(%esp) movl 112(%esp),%ecx shr $13,%ecx movl %ecx,12(%esp) movl 120(%esp),%ecx shl $13,%ecx movl %ecx,48(%esp) movl 120(%esp),%ecx shr $19,%ecx movl %ecx,16(%esp) movl 128(%esp),%ecx shl $6,%ecx movl %ecx,52(%esp) movl 128(%esp),%ecx shr $26,%ecx movl 136(%esp),%edx add %edx,%ecx movl %ecx,20(%esp) movl 144(%esp),%ecx shl $25,%ecx movl %ecx,56(%esp) movl 144(%esp),%ecx shr $7,%ecx movl %ecx,24(%esp) movl 152(%esp),%ecx shl $19,%ecx movl %ecx,60(%esp) movl 152(%esp),%ecx shr $13,%ecx movl %ecx,28(%esp) movl 160(%esp),%ecx shl $12,%ecx movl %ecx,64(%esp) movl 160(%esp),%ecx shr $20,%ecx movl %ecx,32(%esp) movl 168(%esp),%ecx shl $6,%ecx movl %ecx,68(%esp) movl 168(%esp),%ecx shr $26,%ecx movl %ecx,36(%esp) mov $0,%ecx movl %ecx,72(%esp) movl 4(%esp),%ecx addl 40(%esp),%ecx movl %ecx,4(%esp) movl 8(%esp),%ecx adcl 44(%esp),%ecx movl %ecx,8(%esp) movl 12(%esp),%ecx adcl 48(%esp),%ecx movl %ecx,12(%esp) movl 16(%esp),%ecx adcl 52(%esp),%ecx movl %ecx,16(%esp) movl 20(%esp),%ecx adcl 56(%esp),%ecx movl %ecx,20(%esp) movl 24(%esp),%ecx adcl 60(%esp),%ecx movl %ecx,24(%esp) movl 28(%esp),%ecx adcl 64(%esp),%ecx movl %ecx,28(%esp) movl 32(%esp),%ecx adcl 68(%esp),%ecx movl %ecx,32(%esp) movl 36(%esp),%ecx adcl 72(%esp),%ecx movl %ecx,36(%esp) movl 4(%esp),%ecx adc $0x13,%ecx movl %ecx,40(%esp) movl 8(%esp),%ecx adc $0,%ecx movl %ecx,44(%esp) movl 12(%esp),%ecx adc $0,%ecx movl %ecx,48(%esp) movl 16(%esp),%ecx adc $0,%ecx movl %ecx,52(%esp) movl 20(%esp),%ecx adc $0,%ecx movl %ecx,56(%esp) movl 24(%esp),%ecx adc $0,%ecx movl %ecx,60(%esp) movl 28(%esp),%ecx adc $0,%ecx movl %ecx,64(%esp) movl 32(%esp),%ecx adc $0x80000000,%ecx movl %ecx,68(%esp) movl 36(%esp),%ebp adc $0xffffffff,%ebp and $0x80000000,%ebp sar $31,%ebp movl 4(%esp,%eax),%ecx movl 4(%esp),%edx xorl 40(%esp),%edx and %ebp,%edx xorl 40(%esp),%edx movl %edx,0(%ecx) movl 8(%esp),%edx xorl 44(%esp),%edx and %ebp,%edx xorl 44(%esp),%edx movl %edx,4(%ecx) movl 12(%esp),%edx xorl 48(%esp),%edx and %ebp,%edx xorl 48(%esp),%edx movl %edx,8(%ecx) movl 16(%esp),%edx xorl 52(%esp),%edx and %ebp,%edx xorl 52(%esp),%edx movl %edx,12(%ecx) movl 20(%esp),%edx xorl 56(%esp),%edx and %ebp,%edx xorl 56(%esp),%edx movl %edx,16(%ecx) movl 24(%esp),%edx xorl 60(%esp),%edx and %ebp,%edx xorl 60(%esp),%edx movl %edx,20(%ecx) movl 28(%esp),%edx xorl 64(%esp),%edx and %ebp,%edx xorl 64(%esp),%edx movl %edx,24(%ecx) movl 32(%esp),%edx xorl 68(%esp),%edx and %ebp,%edx xorl 68(%esp),%edx movl %edx,28(%ecx) movl 0(%esp),%ebp add %eax,%esp ret curvedns-curvedns-0.87/nacl/crypto_scalarmult/curve25519/athlon/init.s000066400000000000000000000005011150631715100257560ustar00rootroot00000000000000.text .p2align 5 .globl _crypto_scalarmult_curve25519_athlon_init .globl crypto_scalarmult_curve25519_athlon_init _crypto_scalarmult_curve25519_athlon_init: crypto_scalarmult_curve25519_athlon_init: mov %esp,%eax and $31,%eax add $0,%eax sub %eax,%esp fldcw crypto_scalarmult_curve25519_athlon_rounding add %eax,%esp ret curvedns-curvedns-0.87/nacl/crypto_scalarmult/curve25519/athlon/mainloop.s000066400000000000000000002236621150631715100266500ustar00rootroot00000000000000.text .p2align 5 .globl _crypto_scalarmult_curve25519_athlon_mainloop .globl crypto_scalarmult_curve25519_athlon_mainloop _crypto_scalarmult_curve25519_athlon_mainloop: crypto_scalarmult_curve25519_athlon_mainloop: mov %esp,%eax and $31,%eax add $704,%eax sub %eax,%esp lea 256(%esp),%edx lea 512(%esp),%ecx fldl crypto_scalarmult_curve25519_athlon_two0 fldl crypto_scalarmult_curve25519_athlon_zero movl %eax,160(%ecx) movl %ebx,164(%ecx) movl %esi,168(%ecx) movl %edi,172(%ecx) movl %ebp,176(%ecx) movl 4(%esp,%eax),%ebx fxch %st(1) fstl 0(%esp) fxch %st(1) fstl 8(%esp) fstl 16(%esp) fstl 24(%esp) fstl 32(%esp) fstl 40(%esp) fstl 48(%esp) fstl -120(%edx) fstl -112(%edx) fstl -104(%edx) fstl -96(%edx) fstl -88(%edx) fstl -80(%edx) fstl -72(%edx) fstl -64(%edx) fstl -56(%edx) fstl -48(%edx) fstl -40(%edx) fstl -32(%edx) fstl -24(%edx) fxch %st(1) fstpl 64(%edx) fstl 72(%edx) fstl 80(%edx) fstl 88(%edx) fstl 96(%edx) fstl 104(%edx) fstl 112(%edx) fstl 120(%edx) fstl -128(%ecx) fstpl -120(%ecx) fldl 0(%ebx) fldl 8(%ebx) fldl 16(%ebx) fldl 24(%ebx) fxch %st(3) fstl -16(%edx) fstpl 56(%esp) fldl 32(%ebx) fxch %st(2) fstl -8(%edx) fstpl 64(%esp) fldl 40(%ebx) fxch %st(1) fstl 0(%edx) fstpl 72(%esp) fldl 48(%ebx) fxch %st(3) fstl 8(%edx) fstpl 80(%esp) fldl 56(%ebx) fxch %st(2) fstl 16(%edx) fstpl 88(%esp) fldl 64(%ebx) fxch %st(1) fstl 24(%edx) fstpl 96(%esp) fldl 72(%ebx) fxch %st(3) fstl 32(%edx) fstpl 104(%esp) fxch %st(1) fstl 40(%edx) fstpl 112(%esp) fstl 48(%edx) fstpl 120(%esp) fstl 56(%edx) fstpl -128(%edx) movl 8(%esp,%eax),%ebx mov $28,%edi mov $31,%ebp movl 28(%ebx),%esi rol $1,%esi ._morebytes: movl %edi,188(%ecx) ._morebits: rol $1,%esi movl %esi,180(%ecx) movl %ebp,184(%ecx) and $1,%esi movl $0x43300000,-108(%ecx) movl %esi,-112(%ecx) fldl -96(%edx) fldl 0(%esp) fadd %st(0),%st(1) fsubl -96(%edx) fldl 64(%edx) fldl -16(%edx) fadd %st(0),%st(1) fsubl 64(%edx) fldl -88(%edx) fldl 8(%esp) fadd %st(0),%st(1) fsubl -88(%edx) fxch %st(5) fstpl 0(%esp) fxch %st(3) fstpl -96(%edx) fldl 72(%edx) fldl -8(%edx) fadd %st(0),%st(1) fsubl 72(%edx) fxch %st(3) fstpl -16(%edx) fxch %st(1) fstpl 64(%edx) fldl -80(%edx) fldl 16(%esp) fadd %st(0),%st(1) fsubl -80(%edx) fxch %st(4) fstpl 8(%esp) fxch %st(4) fstpl -88(%edx) fldl 80(%edx) fldl 0(%edx) fadd %st(0),%st(1) fsubl 80(%edx) fxch %st(2) fstpl -8(%edx) fxch %st(2) fstpl 72(%edx) fldl -72(%edx) fldl 24(%esp) fadd %st(0),%st(1) fsubl -72(%edx) fxch %st(5) fstpl 16(%esp) fxch %st(3) fstpl -80(%edx) fldl 88(%edx) fldl 8(%edx) fadd %st(0),%st(1) fsubl 88(%edx) fxch %st(3) fstpl 0(%edx) fxch %st(1) fstpl 80(%edx) fldl -64(%edx) fldl 32(%esp) fadd %st(0),%st(1) fsubl -64(%edx) fxch %st(4) fstpl 24(%esp) fxch %st(4) fstpl -72(%edx) fldl 96(%edx) fldl 16(%edx) fadd %st(0),%st(1) fsubl 96(%edx) fxch %st(2) fstpl 8(%edx) fxch %st(2) fstpl 88(%edx) fldl -56(%edx) fldl 40(%esp) fadd %st(0),%st(1) fsubl -56(%edx) fxch %st(5) fstpl 32(%esp) fxch %st(3) fstpl -64(%edx) fldl 104(%edx) fldl 24(%edx) fadd %st(0),%st(1) fsubl 104(%edx) fxch %st(3) fstpl 16(%edx) fxch %st(1) fstpl 96(%edx) fldl -48(%edx) fldl 48(%esp) fadd %st(0),%st(1) fsubl -48(%edx) fxch %st(4) fstpl 40(%esp) fxch %st(4) fstpl -56(%edx) fldl 112(%edx) fldl 32(%edx) fadd %st(0),%st(1) fsubl 112(%edx) fxch %st(2) fstpl 24(%edx) fxch %st(2) fstpl 104(%edx) fldl -40(%edx) fldl -120(%edx) fadd %st(0),%st(1) fsubl -40(%edx) fxch %st(5) fstpl 48(%esp) fxch %st(3) fstpl -48(%edx) fldl 120(%edx) fldl 40(%edx) fadd %st(0),%st(1) fsubl 120(%edx) fxch %st(3) fstpl 32(%edx) fxch %st(1) fstpl 112(%edx) fldl -32(%edx) fldl -112(%edx) fadd %st(0),%st(1) fsubl -32(%edx) fxch %st(4) fstpl -120(%edx) fxch %st(4) fstpl -40(%edx) fldl -128(%ecx) fldl 48(%edx) fadd %st(0),%st(1) fsubl -128(%ecx) fxch %st(2) fstpl 40(%edx) fxch %st(2) fstpl 120(%edx) fldl -24(%edx) fldl -104(%edx) fadd %st(0),%st(1) fsubl -24(%edx) fxch %st(5) fstpl -112(%edx) fxch %st(3) fstpl -32(%edx) fldl -120(%ecx) fldl 56(%edx) fadd %st(0),%st(1) fsubl -120(%ecx) fxch %st(3) fstpl 48(%edx) fxch %st(1) fstpl -128(%ecx) fldl -112(%ecx) fsubl crypto_scalarmult_curve25519_athlon_in0offset fldl crypto_scalarmult_curve25519_athlon_two0 fsub %st(1),%st(0) fxch %st(4) fstpl -104(%edx) fxch %st(4) fstpl -24(%edx) fstpl 56(%edx) fstpl -120(%ecx) fxch %st(1) fstl 136(%ecx) fldl 0(%esp) fmul %st(2),%st(0) fldl -16(%edx) fmul %st(2),%st(0) faddp %st(0),%st(1) fldl 8(%esp) fmul %st(3),%st(0) fldl -8(%edx) fmul %st(3),%st(0) faddp %st(0),%st(1) fldl 16(%esp) fmul %st(4),%st(0) fldl 0(%edx) fmul %st(4),%st(0) faddp %st(0),%st(1) fldl 24(%esp) fmul %st(5),%st(0) fldl 8(%edx) fmul %st(5),%st(0) faddp %st(0),%st(1) fxch %st(3) fstpl -112(%ecx) fldl 32(%esp) fmul %st(5),%st(0) fldl 16(%edx) fmul %st(5),%st(0) faddp %st(0),%st(1) fxch %st(2) fstpl -104(%ecx) fldl 40(%esp) fmul %st(5),%st(0) fldl 24(%edx) fmul %st(5),%st(0) faddp %st(0),%st(1) fxch %st(1) fstpl -96(%ecx) fldl 48(%esp) fmul %st(5),%st(0) fldl 32(%edx) fmul %st(5),%st(0) faddp %st(0),%st(1) fxch %st(3) fstpl -88(%ecx) fldl -120(%edx) fmul %st(5),%st(0) fldl 40(%edx) fmul %st(5),%st(0) faddp %st(0),%st(1) fxch %st(2) fstpl -80(%ecx) fldl -112(%edx) fmul %st(5),%st(0) fldl 48(%edx) fmul %st(5),%st(0) faddp %st(0),%st(1) fxch %st(1) fstpl -72(%ecx) fldl -104(%edx) fmul %st(5),%st(0) fldl 56(%edx) fmul %st(5),%st(0) faddp %st(0),%st(1) fxch %st(3) fstpl -64(%ecx) fldl -96(%edx) fmul %st(5),%st(0) fldl 64(%edx) fmul %st(5),%st(0) faddp %st(0),%st(1) fxch %st(2) fstpl -56(%ecx) fldl -88(%edx) fmul %st(5),%st(0) fldl 72(%edx) fmul %st(5),%st(0) faddp %st(0),%st(1) fxch %st(1) fstpl -48(%ecx) fldl -80(%edx) fmul %st(5),%st(0) fldl 80(%edx) fmul %st(5),%st(0) faddp %st(0),%st(1) fxch %st(3) fstpl -40(%ecx) fldl -72(%edx) fmul %st(5),%st(0) fldl 88(%edx) fmul %st(5),%st(0) faddp %st(0),%st(1) fxch %st(2) fstpl -32(%ecx) fldl -64(%edx) fmul %st(5),%st(0) fldl 96(%edx) fmul %st(5),%st(0) faddp %st(0),%st(1) fxch %st(1) fstpl -24(%ecx) fldl -56(%edx) fmul %st(5),%st(0) fldl 104(%edx) fmul %st(5),%st(0) faddp %st(0),%st(1) fxch %st(3) fstpl -16(%ecx) fldl -48(%edx) fmul %st(5),%st(0) fldl 112(%edx) fmul %st(5),%st(0) faddp %st(0),%st(1) fxch %st(2) fstpl -8(%ecx) fldl -40(%edx) fmul %st(5),%st(0) fldl 120(%edx) fmul %st(5),%st(0) faddp %st(0),%st(1) fxch %st(1) fstpl 0(%ecx) fldl -32(%edx) fmul %st(5),%st(0) fldl -128(%ecx) fmul %st(5),%st(0) faddp %st(0),%st(1) fxch %st(3) fstpl 8(%ecx) fldl -24(%edx) fmulp %st(0),%st(5) fldl -120(%ecx) fmulp %st(0),%st(4) fxch %st(3) faddp %st(0),%st(4) fstpl 16(%ecx) fxch %st(1) fstpl 24(%ecx) fstpl 32(%ecx) fstpl 40(%ecx) fldl -24(%edx) fmull 56(%edx) fmull crypto_scalarmult_curve25519_athlon_scale fldl -96(%edx) fmull 48(%edx) faddp %st(0),%st(1) fldl -88(%edx) fmull 40(%edx) faddp %st(0),%st(1) fldl -96(%edx) fmull 56(%edx) fldl -80(%edx) fmull 32(%edx) faddp %st(0),%st(2) fldl -88(%edx) fmull 48(%edx) faddp %st(0),%st(1) fldl -72(%edx) fmull 24(%edx) faddp %st(0),%st(2) fldl -80(%edx) fmull 40(%edx) faddp %st(0),%st(1) fldl -64(%edx) fmull 16(%edx) faddp %st(0),%st(2) fldl -72(%edx) fmull 32(%edx) faddp %st(0),%st(1) fldl -88(%edx) fmull 56(%edx) fldl -56(%edx) fmull 8(%edx) faddp %st(0),%st(3) fldl -64(%edx) fmull 24(%edx) faddp %st(0),%st(2) fldl -80(%edx) fmull 48(%edx) faddp %st(0),%st(1) fldl -48(%edx) fmull 0(%edx) faddp %st(0),%st(3) fldl -56(%edx) fmull 16(%edx) faddp %st(0),%st(2) fldl -72(%edx) fmull 40(%edx) faddp %st(0),%st(1) fldl -40(%edx) fmull -8(%edx) faddp %st(0),%st(3) fldl -48(%edx) fmull 8(%edx) faddp %st(0),%st(2) fldl -64(%edx) fmull 32(%edx) faddp %st(0),%st(1) fldl -32(%edx) fmull -16(%edx) faddp %st(0),%st(3) fldl -40(%edx) fmull 0(%edx) faddp %st(0),%st(2) fldl -56(%edx) fmull 24(%edx) faddp %st(0),%st(1) fldl -80(%edx) fmull 56(%edx) fldl -48(%edx) fmull 16(%edx) faddp %st(0),%st(2) fldl -32(%edx) fmull -8(%edx) faddp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(4),%st(0) fldl -72(%edx) fmull 48(%edx) faddp %st(0),%st(2) fldl -40(%edx) fmull 8(%edx) faddp %st(0),%st(3) fldl -24(%edx) fmull -16(%edx) faddp %st(0),%st(4) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fldl -64(%edx) fmull 40(%edx) faddp %st(0),%st(2) fldl -72(%edx) fmull 56(%edx) fldl -32(%edx) fmull 0(%edx) faddp %st(0),%st(4) fxch %st(1) fadd %st(0),%st(4) fldl -56(%edx) fmull 32(%edx) faddp %st(0),%st(3) fldl -64(%edx) fmull 48(%edx) faddp %st(0),%st(2) fsubrp %st(0),%st(5) fldl crypto_scalarmult_curve25519_athlon_alpha255 fadd %st(4),%st(0) fldl -48(%edx) fmull 24(%edx) faddp %st(0),%st(3) fldl -56(%edx) fmull 40(%edx) faddp %st(0),%st(2) fldl -24(%edx) fmull -8(%edx) faddp %st(0),%st(4) fsubl crypto_scalarmult_curve25519_athlon_alpha255 fldl -40(%edx) fmull 16(%edx) faddp %st(0),%st(3) fldl -64(%edx) fmull 56(%edx) fldl -48(%edx) fmull 32(%edx) faddp %st(0),%st(3) fldl -32(%edx) fmull 8(%edx) faddp %st(0),%st(4) fxch %st(1) fadd %st(0),%st(4) fsubrp %st(0),%st(5) fxch %st(5) fstpl 64(%ecx) fldl -56(%edx) fmull 48(%edx) faddp %st(0),%st(5) fldl -40(%edx) fmull 24(%edx) faddp %st(0),%st(1) fldl -24(%edx) fmull 0(%edx) faddp %st(0),%st(2) fxch %st(2) fmull crypto_scalarmult_curve25519_athlon_scale fldl -48(%edx) fmull 40(%edx) faddp %st(0),%st(5) fldl -32(%edx) fmull 16(%edx) faddp %st(0),%st(3) fxch %st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl -96(%edx) fmull -16(%edx) faddp %st(0),%st(2) fxch %st(3) fstpl 72(%ecx) fldl -56(%edx) fmull 56(%edx) fldl -40(%edx) fmull 32(%edx) faddp %st(0),%st(5) fldl -24(%edx) fmull 8(%edx) faddp %st(0),%st(3) fldl -96(%edx) fmull -8(%edx) faddp %st(0),%st(4) fldl crypto_scalarmult_curve25519_athlon_alpha26 fadd %st(2),%st(0) fldl -48(%edx) fmull 48(%edx) faddp %st(0),%st(2) fldl -32(%edx) fmull 24(%edx) faddp %st(0),%st(6) fxch %st(3) fmull crypto_scalarmult_curve25519_athlon_scale fldl -88(%edx) fmull -16(%edx) faddp %st(0),%st(5) fxch %st(3) fsubl crypto_scalarmult_curve25519_athlon_alpha26 fldl -40(%edx) fmull 40(%edx) faddp %st(0),%st(2) fldl -24(%edx) fmull 16(%edx) faddp %st(0),%st(6) fldl -96(%edx) fmull 0(%edx) faddp %st(0),%st(4) fadd %st(0),%st(4) fsubrp %st(0),%st(2) fldl -48(%edx) fmull 56(%edx) fldl -32(%edx) fmull 32(%edx) faddp %st(0),%st(2) fxch %st(5) fmull crypto_scalarmult_curve25519_athlon_scale fldl -88(%edx) fmull -8(%edx) faddp %st(0),%st(4) fldl crypto_scalarmult_curve25519_athlon_alpha51 fadd %st(5),%st(0) fldl -40(%edx) fmull 48(%edx) faddp %st(0),%st(7) fldl -24(%edx) fmull 24(%edx) faddp %st(0),%st(3) fldl -96(%edx) fmull 8(%edx) faddp %st(0),%st(2) fldl -80(%edx) fmull -16(%edx) faddp %st(0),%st(5) fsubl crypto_scalarmult_curve25519_athlon_alpha51 fxch %st(3) fstpl 48(%ecx) fldl -32(%edx) fmull 40(%edx) faddp %st(0),%st(6) fxch %st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl -88(%edx) fmull 0(%edx) faddp %st(0),%st(2) fxch %st(2) fadd %st(0),%st(3) fsubrp %st(0),%st(4) fldl -40(%edx) fmull 56(%edx) fldl -24(%edx) fmull 32(%edx) faddp %st(0),%st(6) fldl -96(%edx) fmull 16(%edx) faddp %st(0),%st(3) fldl -80(%edx) fmull -8(%edx) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha77 fadd %st(4),%st(0) fldl -32(%edx) fmull 48(%edx) faddp %st(0),%st(2) fxch %st(6) fmull crypto_scalarmult_curve25519_athlon_scale fldl -88(%edx) fmull 8(%edx) faddp %st(0),%st(4) fldl -72(%edx) fmull -16(%edx) faddp %st(0),%st(3) fxch %st(6) fsubl crypto_scalarmult_curve25519_athlon_alpha77 fxch %st(5) fstpl 56(%ecx) fldl -24(%edx) fmull 40(%edx) faddp %st(0),%st(1) fldl -96(%edx) fmull 24(%edx) faddp %st(0),%st(6) fldl -80(%edx) fmull 0(%edx) faddp %st(0),%st(3) fxch %st(4) fadd %st(0),%st(1) fsubrp %st(0),%st(3) fldl -32(%edx) fmull 56(%edx) fxch %st(4) fmull crypto_scalarmult_curve25519_athlon_scale fldl -88(%edx) fmull 16(%edx) faddp %st(0),%st(6) fldl -72(%edx) fmull -8(%edx) faddp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha102 fadd %st(2),%st(0) fldl -24(%edx) fmull 48(%edx) faddp %st(0),%st(6) fldl -96(%edx) fmull 32(%edx) faddp %st(0),%st(2) fldl -80(%edx) fmull 8(%edx) faddp %st(0),%st(7) fldl -64(%edx) fmull -16(%edx) faddp %st(0),%st(4) fsubl crypto_scalarmult_curve25519_athlon_alpha102 fxch %st(4) fstpl -24(%edx) fxch %st(4) fmull crypto_scalarmult_curve25519_athlon_scale fldl -88(%edx) fmull 24(%edx) faddp %st(0),%st(5) fldl -72(%edx) fmull 0(%edx) faddp %st(0),%st(6) fxch %st(3) fadd %st(0),%st(2) fsubrp %st(0),%st(1) fldl -96(%edx) fmull 40(%edx) faddp %st(0),%st(3) fldl -80(%edx) fmull 16(%edx) faddp %st(0),%st(4) fldl -64(%edx) fmull -8(%edx) faddp %st(0),%st(5) fldl crypto_scalarmult_curve25519_athlon_alpha128 fadd %st(2),%st(0) fldl -88(%edx) fmull 32(%edx) faddp %st(0),%st(4) fldl -72(%edx) fmull 8(%edx) faddp %st(0),%st(5) fldl -56(%edx) fmull -16(%edx) faddp %st(0),%st(6) fsubl crypto_scalarmult_curve25519_athlon_alpha128 fxch %st(1) fstpl -96(%edx) fldl -80(%edx) fmull 24(%edx) faddp %st(0),%st(3) fldl -64(%edx) fmull 0(%edx) faddp %st(0),%st(4) fadd %st(0),%st(4) fsubrp %st(0),%st(1) fstpl -88(%edx) fldl -72(%edx) fmull 16(%edx) faddp %st(0),%st(1) fldl -56(%edx) fmull -8(%edx) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha153 fadd %st(3),%st(0) fldl -64(%edx) fmull 8(%edx) faddp %st(0),%st(2) fldl -48(%edx) fmull -16(%edx) faddp %st(0),%st(3) fsubl crypto_scalarmult_curve25519_athlon_alpha153 fldl -56(%edx) fmull 0(%edx) faddp %st(0),%st(2) fadd %st(0),%st(2) fsubrp %st(0),%st(3) fxch %st(2) fstpl -80(%edx) fldl -48(%edx) fmull -8(%edx) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha179 fadd %st(1),%st(0) fldl -40(%edx) fmull -16(%edx) faddp %st(0),%st(3) fsubl crypto_scalarmult_curve25519_athlon_alpha179 fldl 64(%ecx) fldl 72(%ecx) fxch %st(2) fadd %st(0),%st(4) fsubrp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha204 fadd %st(4),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha204 fadd %st(0),%st(1) fsubrp %st(0),%st(4) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fsubr %st(0),%st(1) faddp %st(0),%st(2) fxch %st(2) fstpl -72(%edx) fxch %st(2) fstpl -64(%edx) fstpl -56(%edx) fstpl -48(%edx) fldl -104(%edx) fmull -120(%ecx) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull -128(%ecx) faddp %st(0),%st(1) fldl 8(%esp) fmull 120(%edx) faddp %st(0),%st(1) fldl 0(%esp) fmull -120(%ecx) fldl 16(%esp) fmull 112(%edx) faddp %st(0),%st(2) fldl 8(%esp) fmull -128(%ecx) faddp %st(0),%st(1) fldl 24(%esp) fmull 104(%edx) faddp %st(0),%st(2) fldl 16(%esp) fmull 120(%edx) faddp %st(0),%st(1) fldl 32(%esp) fmull 96(%edx) faddp %st(0),%st(2) fldl 24(%esp) fmull 112(%edx) faddp %st(0),%st(1) fldl 8(%esp) fmull -120(%ecx) fldl 40(%esp) fmull 88(%edx) faddp %st(0),%st(3) fldl 32(%esp) fmull 104(%edx) faddp %st(0),%st(2) fldl 16(%esp) fmull -128(%ecx) faddp %st(0),%st(1) fldl 48(%esp) fmull 80(%edx) faddp %st(0),%st(3) fldl 40(%esp) fmull 96(%edx) faddp %st(0),%st(2) fldl 24(%esp) fmull 120(%edx) faddp %st(0),%st(1) fldl -120(%edx) fmull 72(%edx) faddp %st(0),%st(3) fldl 48(%esp) fmull 88(%edx) faddp %st(0),%st(2) fldl 32(%esp) fmull 112(%edx) faddp %st(0),%st(1) fldl -112(%edx) fmull 64(%edx) faddp %st(0),%st(3) fldl -120(%edx) fmull 80(%edx) faddp %st(0),%st(2) fldl 40(%esp) fmull 104(%edx) faddp %st(0),%st(1) fldl 16(%esp) fmull -120(%ecx) fldl 48(%esp) fmull 96(%edx) faddp %st(0),%st(2) fldl -112(%edx) fmull 72(%edx) faddp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(4),%st(0) fldl 24(%esp) fmull -128(%ecx) faddp %st(0),%st(2) fldl -120(%edx) fmull 88(%edx) faddp %st(0),%st(3) fldl -104(%edx) fmull 64(%edx) faddp %st(0),%st(4) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fldl 32(%esp) fmull 120(%edx) faddp %st(0),%st(2) fldl 24(%esp) fmull -120(%ecx) fldl -112(%edx) fmull 80(%edx) faddp %st(0),%st(4) fxch %st(1) fadd %st(0),%st(4) fldl 40(%esp) fmull 112(%edx) faddp %st(0),%st(3) fldl 32(%esp) fmull -128(%ecx) faddp %st(0),%st(2) fsubrp %st(0),%st(5) fldl crypto_scalarmult_curve25519_athlon_alpha255 fadd %st(4),%st(0) fldl 48(%esp) fmull 104(%edx) faddp %st(0),%st(3) fldl 40(%esp) fmull 120(%edx) faddp %st(0),%st(2) fldl -104(%edx) fmull 72(%edx) faddp %st(0),%st(4) fsubl crypto_scalarmult_curve25519_athlon_alpha255 fldl -120(%edx) fmull 96(%edx) faddp %st(0),%st(3) fldl 32(%esp) fmull -120(%ecx) fldl 48(%esp) fmull 112(%edx) faddp %st(0),%st(3) fldl -112(%edx) fmull 88(%edx) faddp %st(0),%st(4) fxch %st(1) fadd %st(0),%st(4) fsubrp %st(0),%st(5) fxch %st(5) fstpl 8(%edx) fldl 40(%esp) fmull -128(%ecx) faddp %st(0),%st(5) fldl -120(%edx) fmull 104(%edx) faddp %st(0),%st(1) fldl -104(%edx) fmull 80(%edx) faddp %st(0),%st(2) fxch %st(2) fmull crypto_scalarmult_curve25519_athlon_scale fldl 48(%esp) fmull 120(%edx) faddp %st(0),%st(5) fldl -112(%edx) fmull 96(%edx) faddp %st(0),%st(3) fxch %st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull 64(%edx) faddp %st(0),%st(2) fxch %st(3) fstpl 16(%edx) fldl 40(%esp) fmull -120(%ecx) fldl -120(%edx) fmull 112(%edx) faddp %st(0),%st(5) fldl -104(%edx) fmull 88(%edx) faddp %st(0),%st(3) fldl 0(%esp) fmull 72(%edx) faddp %st(0),%st(4) fldl crypto_scalarmult_curve25519_athlon_alpha26 fadd %st(2),%st(0) fldl 48(%esp) fmull -128(%ecx) faddp %st(0),%st(2) fldl -112(%edx) fmull 104(%edx) faddp %st(0),%st(6) fxch %st(3) fmull crypto_scalarmult_curve25519_athlon_scale fldl 8(%esp) fmull 64(%edx) faddp %st(0),%st(5) fxch %st(3) fsubl crypto_scalarmult_curve25519_athlon_alpha26 fldl -120(%edx) fmull 120(%edx) faddp %st(0),%st(2) fldl -104(%edx) fmull 96(%edx) faddp %st(0),%st(6) fldl 0(%esp) fmull 80(%edx) faddp %st(0),%st(4) fadd %st(0),%st(4) fsubrp %st(0),%st(2) fldl 48(%esp) fmull -120(%ecx) fldl -112(%edx) fmull 112(%edx) faddp %st(0),%st(2) fxch %st(5) fmull crypto_scalarmult_curve25519_athlon_scale fldl 8(%esp) fmull 72(%edx) faddp %st(0),%st(4) fldl crypto_scalarmult_curve25519_athlon_alpha51 fadd %st(5),%st(0) fldl -120(%edx) fmull -128(%ecx) faddp %st(0),%st(7) fldl -104(%edx) fmull 104(%edx) faddp %st(0),%st(3) fldl 0(%esp) fmull 88(%edx) faddp %st(0),%st(2) fldl 16(%esp) fmull 64(%edx) faddp %st(0),%st(5) fsubl crypto_scalarmult_curve25519_athlon_alpha51 fxch %st(3) fstpl -40(%edx) fldl -112(%edx) fmull 120(%edx) faddp %st(0),%st(6) fxch %st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 8(%esp) fmull 80(%edx) faddp %st(0),%st(2) fxch %st(2) fadd %st(0),%st(3) fsubrp %st(0),%st(4) fldl -120(%edx) fmull -120(%ecx) fldl -104(%edx) fmull 112(%edx) faddp %st(0),%st(6) fldl 0(%esp) fmull 96(%edx) faddp %st(0),%st(3) fldl 16(%esp) fmull 72(%edx) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha77 fadd %st(4),%st(0) fldl -112(%edx) fmull -128(%ecx) faddp %st(0),%st(2) fxch %st(6) fmull crypto_scalarmult_curve25519_athlon_scale fldl 8(%esp) fmull 88(%edx) faddp %st(0),%st(4) fldl 24(%esp) fmull 64(%edx) faddp %st(0),%st(3) fxch %st(6) fsubl crypto_scalarmult_curve25519_athlon_alpha77 fxch %st(5) fstpl -32(%edx) fldl -104(%edx) fmull 120(%edx) faddp %st(0),%st(1) fldl 0(%esp) fmull 104(%edx) faddp %st(0),%st(6) fldl 16(%esp) fmull 80(%edx) faddp %st(0),%st(3) fxch %st(4) fadd %st(0),%st(1) fsubrp %st(0),%st(3) fldl -112(%edx) fmull -120(%ecx) fxch %st(4) fmull crypto_scalarmult_curve25519_athlon_scale fldl 8(%esp) fmull 96(%edx) faddp %st(0),%st(6) fldl 24(%esp) fmull 72(%edx) faddp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha102 fadd %st(2),%st(0) fldl -104(%edx) fmull -128(%ecx) faddp %st(0),%st(6) fldl 0(%esp) fmull 112(%edx) faddp %st(0),%st(2) fldl 16(%esp) fmull 88(%edx) faddp %st(0),%st(7) fldl 32(%esp) fmull 64(%edx) faddp %st(0),%st(4) fsubl crypto_scalarmult_curve25519_athlon_alpha102 fxch %st(4) fstpl -104(%edx) fxch %st(4) fmull crypto_scalarmult_curve25519_athlon_scale fldl 8(%esp) fmull 104(%edx) faddp %st(0),%st(5) fldl 24(%esp) fmull 80(%edx) faddp %st(0),%st(6) fxch %st(3) fadd %st(0),%st(2) fsubrp %st(0),%st(1) fldl 0(%esp) fmull 120(%edx) faddp %st(0),%st(3) fldl 16(%esp) fmull 96(%edx) faddp %st(0),%st(4) fldl 32(%esp) fmull 72(%edx) faddp %st(0),%st(5) fldl crypto_scalarmult_curve25519_athlon_alpha128 fadd %st(2),%st(0) fldl 8(%esp) fmull 112(%edx) faddp %st(0),%st(4) fldl 24(%esp) fmull 88(%edx) faddp %st(0),%st(5) fldl 40(%esp) fmull 64(%edx) faddp %st(0),%st(6) fsubl crypto_scalarmult_curve25519_athlon_alpha128 fxch %st(1) fstpl -16(%edx) fldl 16(%esp) fmull 104(%edx) faddp %st(0),%st(3) fldl 32(%esp) fmull 80(%edx) faddp %st(0),%st(4) fadd %st(0),%st(4) fsubrp %st(0),%st(1) fstpl -8(%edx) fldl 24(%esp) fmull 96(%edx) faddp %st(0),%st(1) fldl 40(%esp) fmull 72(%edx) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha153 fadd %st(3),%st(0) fldl 32(%esp) fmull 88(%edx) faddp %st(0),%st(2) fldl 48(%esp) fmull 64(%edx) faddp %st(0),%st(3) fsubl crypto_scalarmult_curve25519_athlon_alpha153 fldl 40(%esp) fmull 80(%edx) faddp %st(0),%st(2) fadd %st(0),%st(2) fsubrp %st(0),%st(3) fxch %st(2) fstpl 0(%edx) fldl 48(%esp) fmull 72(%edx) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha179 fadd %st(1),%st(0) fldl -120(%edx) fmull 64(%edx) faddp %st(0),%st(3) fsubl crypto_scalarmult_curve25519_athlon_alpha179 fldl 8(%edx) fldl 16(%edx) fxch %st(2) fadd %st(0),%st(4) fsubrp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha204 fadd %st(4),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha204 fadd %st(0),%st(1) fsubrp %st(0),%st(4) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fsubr %st(0),%st(1) faddp %st(0),%st(2) fxch %st(2) fstpl 8(%edx) fxch %st(2) fstpl 16(%edx) fstpl 24(%edx) fstpl 32(%edx) fldl -40(%ecx) fmul %st(0),%st(0) fldl -112(%ecx) fadd %st(0),%st(0) fldl -104(%ecx) fadd %st(0),%st(0) fldl -96(%ecx) fadd %st(0),%st(0) fldl -56(%ecx) fxch %st(4) fmull crypto_scalarmult_curve25519_athlon_scale fldl -40(%ecx) fmul %st(4),%st(0) fldl -48(%ecx) fmul %st(4),%st(0) faddp %st(0),%st(1) fxch %st(4) fstl 0(%esp) fxch %st(3) fstl 8(%esp) fxch %st(3) fmull -48(%ecx) faddp %st(0),%st(1) fldl -64(%ecx) fxch %st(5) fmul %st(0),%st(3) fxch %st(3) faddp %st(0),%st(1) fxch %st(2) fadd %st(0),%st(0) fldl -56(%ecx) fmul %st(2),%st(0) faddp %st(0),%st(4) fxch %st(1) fstl 16(%esp) fldl -72(%ecx) fxch %st(5) fmul %st(0),%st(1) fxch %st(1) faddp %st(0),%st(3) fadd %st(0),%st(0) fstpl 48(%esp) fldl -88(%ecx) fadd %st(0),%st(0) fstl 24(%esp) fldl -64(%ecx) fmul %st(1),%st(0) faddp %st(0),%st(4) fmul %st(4),%st(0) faddp %st(0),%st(2) fxch %st(3) fadd %st(0),%st(0) fstpl 40(%esp) fldl -80(%ecx) fmul %st(0),%st(0) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fsubr %st(0),%st(1) fldl 8(%esp) fldl -40(%ecx) fmul %st(0),%st(1) fldl 16(%esp) fmul %st(0),%st(1) fldl -48(%ecx) fmul %st(0),%st(1) fxch %st(1) faddp %st(0),%st(3) fldl 24(%esp) fmul %st(0),%st(1) fxch %st(1) faddp %st(0),%st(2) fldl -80(%ecx) fadd %st(0),%st(0) fstl 32(%esp) fmull -72(%ecx) faddp %st(0),%st(6) fxch %st(3) faddp %st(0),%st(5) fldl crypto_scalarmult_curve25519_athlon_alpha255 fadd %st(5),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha255 fsubr %st(0),%st(5) fldl -56(%ecx) fmul %st(0),%st(4) fxch %st(4) faddp %st(0),%st(3) fldl 32(%esp) fmul %st(0),%st(4) fxch %st(4) faddp %st(0),%st(2) fldl -64(%ecx) fmul %st(0),%st(4) fxch %st(4) faddp %st(0),%st(3) fxch %st(3) fmull 40(%esp) faddp %st(0),%st(1) fxch %st(3) fstpl -120(%edx) fldl -72(%ecx) fmul %st(0),%st(0) faddp %st(0),%st(1) fxch %st(2) fmull crypto_scalarmult_curve25519_athlon_scale fxch %st(3) fstpl -112(%edx) faddp %st(0),%st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 24(%esp) fmull -40(%ecx) fldl -112(%ecx) fmul %st(0),%st(0) faddp %st(0),%st(2) fldl 32(%esp) fmull -48(%ecx) faddp %st(0),%st(1) fldl 0(%esp) fmull -104(%ecx) faddp %st(0),%st(3) fldl 40(%esp) fmull -56(%ecx) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha26 fadd %st(2),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha26 fsubr %st(0),%st(2) faddp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha51 fadd %st(3),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha51 fsubr %st(0),%st(3) fldl -64(%ecx) fmul %st(0),%st(0) faddp %st(0),%st(2) fxch %st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull -96(%ecx) faddp %st(0),%st(1) fldl -104(%ecx) fmul %st(0),%st(0) faddp %st(0),%st(1) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha77 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha77 fsubr %st(0),%st(1) fxch %st(2) fstpl 64(%edx) fldl 32(%esp) fmull -40(%ecx) fldl 40(%esp) fmull -48(%ecx) faddp %st(0),%st(1) fldl 48(%esp) fmull -56(%ecx) faddp %st(0),%st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull -88(%ecx) faddp %st(0),%st(1) fldl 8(%esp) fmull -96(%ecx) faddp %st(0),%st(1) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha102 fadd %st(2),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha102 fsubr %st(0),%st(2) fxch %st(3) fstpl 72(%edx) fldl 40(%esp) fmull -40(%ecx) fldl 48(%esp) fmull -48(%ecx) faddp %st(0),%st(1) fldl -56(%ecx) fmul %st(0),%st(0) faddp %st(0),%st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull -80(%ecx) faddp %st(0),%st(1) fldl 8(%esp) fmull -88(%ecx) faddp %st(0),%st(1) fldl -96(%ecx) fmul %st(0),%st(0) faddp %st(0),%st(1) faddp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha128 fadd %st(3),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha128 fsubr %st(0),%st(3) fxch %st(1) fstpl 80(%edx) fldl 48(%esp) fldl -40(%ecx) fmul %st(0),%st(1) fmul %st(5),%st(0) fxch %st(5) fmull -48(%ecx) faddp %st(0),%st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull -72(%ecx) faddp %st(0),%st(1) fldl 8(%esp) fmull -80(%ecx) faddp %st(0),%st(1) fldl 16(%esp) fmull -88(%ecx) faddp %st(0),%st(1) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha153 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha153 fsubr %st(0),%st(1) fxch %st(2) fstpl 88(%edx) fldl -48(%ecx) fmul %st(0),%st(0) faddp %st(0),%st(4) fxch %st(3) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull -64(%ecx) faddp %st(0),%st(1) fldl 8(%esp) fmull -72(%ecx) faddp %st(0),%st(1) fldl 16(%esp) fmull -80(%ecx) faddp %st(0),%st(1) fldl -88(%ecx) fmul %st(0),%st(0) faddp %st(0),%st(1) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha179 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha179 fsubr %st(0),%st(1) fldl -48(%ecx) fadd %st(0),%st(0) fmull -40(%ecx) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull -56(%ecx) faddp %st(0),%st(1) fldl 8(%esp) fmull -64(%ecx) faddp %st(0),%st(1) fldl 16(%esp) fmull -72(%ecx) faddp %st(0),%st(1) fldl 24(%esp) fmull -80(%ecx) faddp %st(0),%st(1) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha204 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha204 fsubr %st(0),%st(1) fldl -120(%edx) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(1),%st(0) fldl -112(%edx) fxch %st(1) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fsubr %st(0),%st(2) faddp %st(0),%st(1) fxch %st(4) fstpl 96(%edx) fxch %st(4) fstpl 104(%edx) fxch %st(1) fstpl 112(%edx) fstpl 120(%edx) fxch %st(1) fstpl -128(%ecx) fstpl -120(%ecx) fldl 40(%ecx) fmul %st(0),%st(0) fldl -32(%ecx) fadd %st(0),%st(0) fldl -24(%ecx) fadd %st(0),%st(0) fldl -16(%ecx) fadd %st(0),%st(0) fldl 24(%ecx) fxch %st(4) fmull crypto_scalarmult_curve25519_athlon_scale fldl 40(%ecx) fmul %st(4),%st(0) fldl 32(%ecx) fmul %st(4),%st(0) faddp %st(0),%st(1) fxch %st(4) fstl 0(%esp) fxch %st(3) fstl 8(%esp) fxch %st(3) fmull 32(%ecx) faddp %st(0),%st(1) fldl 16(%ecx) fxch %st(5) fmul %st(0),%st(3) fxch %st(3) faddp %st(0),%st(1) fxch %st(2) fadd %st(0),%st(0) fldl 24(%ecx) fmul %st(2),%st(0) faddp %st(0),%st(4) fxch %st(1) fstl 16(%esp) fldl 8(%ecx) fxch %st(5) fmul %st(0),%st(1) fxch %st(1) faddp %st(0),%st(3) fadd %st(0),%st(0) fstpl 48(%esp) fldl -8(%ecx) fadd %st(0),%st(0) fstl 24(%esp) fldl 16(%ecx) fmul %st(1),%st(0) faddp %st(0),%st(4) fmul %st(4),%st(0) faddp %st(0),%st(2) fxch %st(3) fadd %st(0),%st(0) fstpl 40(%esp) fldl 0(%ecx) fmul %st(0),%st(0) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fsubr %st(0),%st(1) fldl 8(%esp) fldl 40(%ecx) fmul %st(0),%st(1) fldl 16(%esp) fmul %st(0),%st(1) fldl 32(%ecx) fmul %st(0),%st(1) fxch %st(1) faddp %st(0),%st(3) fldl 24(%esp) fmul %st(0),%st(1) fxch %st(1) faddp %st(0),%st(2) fldl 0(%ecx) fadd %st(0),%st(0) fstl 32(%esp) fmull 8(%ecx) faddp %st(0),%st(6) fxch %st(3) faddp %st(0),%st(5) fldl crypto_scalarmult_curve25519_athlon_alpha255 fadd %st(5),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha255 fsubr %st(0),%st(5) fldl 24(%ecx) fmul %st(0),%st(4) fxch %st(4) faddp %st(0),%st(3) fldl 32(%esp) fmul %st(0),%st(4) fxch %st(4) faddp %st(0),%st(2) fldl 16(%ecx) fmul %st(0),%st(4) fxch %st(4) faddp %st(0),%st(3) fxch %st(3) fmull 40(%esp) faddp %st(0),%st(1) fxch %st(3) fstpl -120(%edx) fldl 8(%ecx) fmul %st(0),%st(0) faddp %st(0),%st(1) fxch %st(2) fmull crypto_scalarmult_curve25519_athlon_scale fxch %st(3) fstpl -112(%edx) faddp %st(0),%st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 24(%esp) fmull 40(%ecx) fldl -32(%ecx) fmul %st(0),%st(0) faddp %st(0),%st(2) fldl 32(%esp) fmull 32(%ecx) faddp %st(0),%st(1) fldl 0(%esp) fmull -24(%ecx) faddp %st(0),%st(3) fldl 40(%esp) fmull 24(%ecx) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha26 fadd %st(2),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha26 fsubr %st(0),%st(2) faddp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha51 fadd %st(3),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha51 fsubr %st(0),%st(3) fldl 16(%ecx) fmul %st(0),%st(0) faddp %st(0),%st(2) fxch %st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull -16(%ecx) faddp %st(0),%st(1) fldl -24(%ecx) fmul %st(0),%st(0) faddp %st(0),%st(1) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha77 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha77 fsubr %st(0),%st(1) fxch %st(2) fstpl -112(%ecx) fldl 32(%esp) fmull 40(%ecx) fldl 40(%esp) fmull 32(%ecx) faddp %st(0),%st(1) fldl 48(%esp) fmull 24(%ecx) faddp %st(0),%st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull -8(%ecx) faddp %st(0),%st(1) fldl 8(%esp) fmull -16(%ecx) faddp %st(0),%st(1) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha102 fadd %st(2),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha102 fsubr %st(0),%st(2) fxch %st(3) fstpl -104(%ecx) fldl 40(%esp) fmull 40(%ecx) fldl 48(%esp) fmull 32(%ecx) faddp %st(0),%st(1) fldl 24(%ecx) fmul %st(0),%st(0) faddp %st(0),%st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull 0(%ecx) faddp %st(0),%st(1) fldl 8(%esp) fmull -8(%ecx) faddp %st(0),%st(1) fldl -16(%ecx) fmul %st(0),%st(0) faddp %st(0),%st(1) faddp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha128 fadd %st(3),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha128 fsubr %st(0),%st(3) fxch %st(1) fstpl -96(%ecx) fldl 48(%esp) fldl 40(%ecx) fmul %st(0),%st(1) fmul %st(5),%st(0) fxch %st(5) fmull 32(%ecx) faddp %st(0),%st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull 8(%ecx) faddp %st(0),%st(1) fldl 8(%esp) fmull 0(%ecx) faddp %st(0),%st(1) fldl 16(%esp) fmull -8(%ecx) faddp %st(0),%st(1) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha153 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha153 fsubr %st(0),%st(1) fxch %st(2) fstpl -88(%ecx) fldl 32(%ecx) fmul %st(0),%st(0) faddp %st(0),%st(4) fxch %st(3) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull 16(%ecx) faddp %st(0),%st(1) fldl 8(%esp) fmull 8(%ecx) faddp %st(0),%st(1) fldl 16(%esp) fmull 0(%ecx) faddp %st(0),%st(1) fldl -8(%ecx) fmul %st(0),%st(0) faddp %st(0),%st(1) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha179 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha179 fsubr %st(0),%st(1) fldl 32(%ecx) fadd %st(0),%st(0) fmull 40(%ecx) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull 24(%ecx) faddp %st(0),%st(1) fldl 8(%esp) fmull 16(%ecx) faddp %st(0),%st(1) fldl 16(%esp) fmull 8(%ecx) faddp %st(0),%st(1) fldl 24(%esp) fmull 0(%ecx) faddp %st(0),%st(1) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha204 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha204 fsubr %st(0),%st(1) fldl -120(%edx) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(1),%st(0) fldl -112(%edx) fxch %st(1) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fsubr %st(0),%st(2) faddp %st(0),%st(1) fxch %st(4) fstpl -80(%ecx) fxch %st(4) fstpl -72(%ecx) fxch %st(1) fstpl -64(%ecx) fstpl -56(%ecx) fxch %st(1) fstpl -48(%ecx) fstpl -40(%ecx) fldl -40(%edx) fldl 48(%ecx) fadd %st(0),%st(1) fsubl -40(%edx) fxch %st(1) fstpl -120(%edx) fstpl -40(%edx) fldl -32(%edx) fldl 56(%ecx) fadd %st(0),%st(1) fsubl -32(%edx) fxch %st(1) fstpl -112(%edx) fstpl -32(%edx) fldl -104(%edx) fldl -24(%edx) fadd %st(0),%st(1) fsubl -104(%edx) fxch %st(1) fstpl -104(%edx) fstpl -24(%edx) fldl -16(%edx) fldl -96(%edx) fadd %st(0),%st(1) fsubl -16(%edx) fxch %st(1) fstpl -96(%edx) fstpl -16(%edx) fldl -8(%edx) fldl -88(%edx) fadd %st(0),%st(1) fsubl -8(%edx) fxch %st(1) fstpl -88(%edx) fstpl -8(%edx) fldl 0(%edx) fldl -80(%edx) fadd %st(0),%st(1) fsubl 0(%edx) fxch %st(1) fstpl -80(%edx) fstpl 0(%edx) fldl 8(%edx) fldl -72(%edx) fadd %st(0),%st(1) fsubl 8(%edx) fxch %st(1) fstpl -72(%edx) fstpl 8(%edx) fldl 16(%edx) fldl -64(%edx) fadd %st(0),%st(1) fsubl 16(%edx) fxch %st(1) fstpl -64(%edx) fstpl 16(%edx) fldl 24(%edx) fldl -56(%edx) fadd %st(0),%st(1) fsubl 24(%edx) fxch %st(1) fstpl -56(%edx) fstpl 24(%edx) fldl 32(%edx) fldl -48(%edx) fadd %st(0),%st(1) fsubl 32(%edx) fxch %st(1) fstpl -48(%edx) fstpl 32(%edx) fldl 64(%edx) fsubl -112(%ecx) fstpl -32(%ecx) fldl 72(%edx) fsubl -104(%ecx) fstpl -24(%ecx) fldl 80(%edx) fsubl -96(%ecx) fstpl -16(%ecx) fldl 88(%edx) fsubl -88(%ecx) fstpl -8(%ecx) fldl 96(%edx) fsubl -80(%ecx) fstpl 0(%ecx) fldl 104(%edx) fsubl -72(%ecx) fstpl 8(%ecx) fldl 112(%edx) fsubl -64(%ecx) fstpl 16(%ecx) fldl 120(%edx) fsubl -56(%ecx) fstpl 24(%ecx) fldl -128(%ecx) fsubl -48(%ecx) fstpl 32(%ecx) fldl -120(%ecx) fsubl -40(%ecx) fstpl 40(%ecx) fldl -48(%edx) fmul %st(0),%st(0) fldl -120(%edx) fadd %st(0),%st(0) fldl -112(%edx) fadd %st(0),%st(0) fldl -104(%edx) fadd %st(0),%st(0) fldl -64(%edx) fxch %st(4) fmull crypto_scalarmult_curve25519_athlon_scale fldl -48(%edx) fmul %st(4),%st(0) fldl -56(%edx) fmul %st(4),%st(0) faddp %st(0),%st(1) fxch %st(4) fstl 0(%esp) fxch %st(3) fstl 8(%esp) fxch %st(3) fmull -56(%edx) faddp %st(0),%st(1) fldl -72(%edx) fxch %st(5) fmul %st(0),%st(3) fxch %st(3) faddp %st(0),%st(1) fxch %st(2) fadd %st(0),%st(0) fldl -64(%edx) fmul %st(2),%st(0) faddp %st(0),%st(4) fxch %st(1) fstl 16(%esp) fldl -80(%edx) fxch %st(5) fmul %st(0),%st(1) fxch %st(1) faddp %st(0),%st(3) fadd %st(0),%st(0) fstpl 48(%esp) fldl -96(%edx) fadd %st(0),%st(0) fstl 24(%esp) fldl -72(%edx) fmul %st(1),%st(0) faddp %st(0),%st(4) fmul %st(4),%st(0) faddp %st(0),%st(2) fxch %st(3) fadd %st(0),%st(0) fstpl 40(%esp) fldl -88(%edx) fmul %st(0),%st(0) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fsubr %st(0),%st(1) fldl 8(%esp) fldl -48(%edx) fmul %st(0),%st(1) fldl 16(%esp) fmul %st(0),%st(1) fldl -56(%edx) fmul %st(0),%st(1) fxch %st(1) faddp %st(0),%st(3) fldl 24(%esp) fmul %st(0),%st(1) fxch %st(1) faddp %st(0),%st(2) fldl -88(%edx) fadd %st(0),%st(0) fstl 32(%esp) fmull -80(%edx) faddp %st(0),%st(6) fxch %st(3) faddp %st(0),%st(5) fldl crypto_scalarmult_curve25519_athlon_alpha255 fadd %st(5),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha255 fsubr %st(0),%st(5) fldl -64(%edx) fmul %st(0),%st(4) fxch %st(4) faddp %st(0),%st(3) fldl 32(%esp) fmul %st(0),%st(4) fxch %st(4) faddp %st(0),%st(2) fldl -72(%edx) fmul %st(0),%st(4) fxch %st(4) faddp %st(0),%st(3) fxch %st(3) fmull 40(%esp) faddp %st(0),%st(1) fxch %st(3) fstpl 48(%edx) fldl -80(%edx) fmul %st(0),%st(0) faddp %st(0),%st(1) fxch %st(2) fmull crypto_scalarmult_curve25519_athlon_scale fxch %st(3) fstpl 56(%edx) faddp %st(0),%st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 24(%esp) fmull -48(%edx) fldl -120(%edx) fmul %st(0),%st(0) faddp %st(0),%st(2) fldl 32(%esp) fmull -56(%edx) faddp %st(0),%st(1) fldl 0(%esp) fmull -112(%edx) faddp %st(0),%st(3) fldl 40(%esp) fmull -64(%edx) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha26 fadd %st(2),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha26 fsubr %st(0),%st(2) faddp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha51 fadd %st(3),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha51 fsubr %st(0),%st(3) fldl -72(%edx) fmul %st(0),%st(0) faddp %st(0),%st(2) fxch %st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull -104(%edx) faddp %st(0),%st(1) fldl -112(%edx) fmul %st(0),%st(0) faddp %st(0),%st(1) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha77 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha77 fsubr %st(0),%st(1) fxch %st(2) fstpl -120(%edx) fldl 32(%esp) fmull -48(%edx) fldl 40(%esp) fmull -56(%edx) faddp %st(0),%st(1) fldl 48(%esp) fmull -64(%edx) faddp %st(0),%st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull -96(%edx) faddp %st(0),%st(1) fldl 8(%esp) fmull -104(%edx) faddp %st(0),%st(1) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha102 fadd %st(2),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha102 fsubr %st(0),%st(2) fxch %st(3) fstpl -112(%edx) fldl 40(%esp) fmull -48(%edx) fldl 48(%esp) fmull -56(%edx) faddp %st(0),%st(1) fldl -64(%edx) fmul %st(0),%st(0) faddp %st(0),%st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull -88(%edx) faddp %st(0),%st(1) fldl 8(%esp) fmull -96(%edx) faddp %st(0),%st(1) fldl -104(%edx) fmul %st(0),%st(0) faddp %st(0),%st(1) faddp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha128 fadd %st(3),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha128 fsubr %st(0),%st(3) fxch %st(1) fstpl -104(%edx) fldl 48(%esp) fldl -48(%edx) fmul %st(0),%st(1) fmul %st(5),%st(0) fxch %st(5) fmull -56(%edx) faddp %st(0),%st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull -80(%edx) faddp %st(0),%st(1) fldl 8(%esp) fmull -88(%edx) faddp %st(0),%st(1) fldl 16(%esp) fmull -96(%edx) faddp %st(0),%st(1) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha153 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha153 fsubr %st(0),%st(1) fxch %st(2) fstpl 40(%edx) fldl -56(%edx) fmul %st(0),%st(0) faddp %st(0),%st(4) fxch %st(3) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull -72(%edx) faddp %st(0),%st(1) fldl 8(%esp) fmull -80(%edx) faddp %st(0),%st(1) fldl 16(%esp) fmull -88(%edx) faddp %st(0),%st(1) fldl -96(%edx) fmul %st(0),%st(0) faddp %st(0),%st(1) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha179 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha179 fsubr %st(0),%st(1) fldl -56(%edx) fadd %st(0),%st(0) fmull -48(%edx) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull -64(%edx) faddp %st(0),%st(1) fldl 8(%esp) fmull -72(%edx) faddp %st(0),%st(1) fldl 16(%esp) fmull -80(%edx) faddp %st(0),%st(1) fldl 24(%esp) fmull -88(%edx) faddp %st(0),%st(1) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha204 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha204 fsubr %st(0),%st(1) fldl 48(%edx) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(1),%st(0) fldl 56(%edx) fxch %st(1) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fsubr %st(0),%st(2) faddp %st(0),%st(1) fxch %st(4) fstpl -96(%edx) fxch %st(4) fstpl -88(%edx) fxch %st(1) fstpl -80(%edx) fstpl -72(%edx) fxch %st(1) fstpl -64(%edx) fstpl -56(%edx) fldl 32(%edx) fmul %st(0),%st(0) fldl -40(%edx) fadd %st(0),%st(0) fldl -32(%edx) fadd %st(0),%st(0) fldl -24(%edx) fadd %st(0),%st(0) fldl 16(%edx) fxch %st(4) fmull crypto_scalarmult_curve25519_athlon_scale fldl 32(%edx) fmul %st(4),%st(0) fldl 24(%edx) fmul %st(4),%st(0) faddp %st(0),%st(1) fxch %st(4) fstl 0(%esp) fxch %st(3) fstl 8(%esp) fxch %st(3) fmull 24(%edx) faddp %st(0),%st(1) fldl 8(%edx) fxch %st(5) fmul %st(0),%st(3) fxch %st(3) faddp %st(0),%st(1) fxch %st(2) fadd %st(0),%st(0) fldl 16(%edx) fmul %st(2),%st(0) faddp %st(0),%st(4) fxch %st(1) fstl 16(%esp) fldl 0(%edx) fxch %st(5) fmul %st(0),%st(1) fxch %st(1) faddp %st(0),%st(3) fadd %st(0),%st(0) fstpl 48(%esp) fldl -16(%edx) fadd %st(0),%st(0) fstl 24(%esp) fldl 8(%edx) fmul %st(1),%st(0) faddp %st(0),%st(4) fmul %st(4),%st(0) faddp %st(0),%st(2) fxch %st(3) fadd %st(0),%st(0) fstpl 40(%esp) fldl -8(%edx) fmul %st(0),%st(0) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fsubr %st(0),%st(1) fldl 8(%esp) fldl 32(%edx) fmul %st(0),%st(1) fldl 16(%esp) fmul %st(0),%st(1) fldl 24(%edx) fmul %st(0),%st(1) fxch %st(1) faddp %st(0),%st(3) fldl 24(%esp) fmul %st(0),%st(1) fxch %st(1) faddp %st(0),%st(2) fldl -8(%edx) fadd %st(0),%st(0) fstl 32(%esp) fmull 0(%edx) faddp %st(0),%st(6) fxch %st(3) faddp %st(0),%st(5) fldl crypto_scalarmult_curve25519_athlon_alpha255 fadd %st(5),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha255 fsubr %st(0),%st(5) fldl 16(%edx) fmul %st(0),%st(4) fxch %st(4) faddp %st(0),%st(3) fldl 32(%esp) fmul %st(0),%st(4) fxch %st(4) faddp %st(0),%st(2) fldl 8(%edx) fmul %st(0),%st(4) fxch %st(4) faddp %st(0),%st(3) fxch %st(3) fmull 40(%esp) faddp %st(0),%st(1) fxch %st(3) fstpl -48(%edx) fldl 0(%edx) fmul %st(0),%st(0) faddp %st(0),%st(1) fxch %st(2) fmull crypto_scalarmult_curve25519_athlon_scale fxch %st(3) fstpl 48(%edx) faddp %st(0),%st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 24(%esp) fmull 32(%edx) fldl -40(%edx) fmul %st(0),%st(0) faddp %st(0),%st(2) fldl 32(%esp) fmull 24(%edx) faddp %st(0),%st(1) fldl 0(%esp) fmull -32(%edx) faddp %st(0),%st(3) fldl 40(%esp) fmull 16(%edx) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha26 fadd %st(2),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha26 fsubr %st(0),%st(2) faddp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha51 fadd %st(3),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha51 fsubr %st(0),%st(3) fldl 8(%edx) fmul %st(0),%st(0) faddp %st(0),%st(2) fxch %st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull -24(%edx) faddp %st(0),%st(1) fldl -32(%edx) fmul %st(0),%st(0) faddp %st(0),%st(1) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha77 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha77 fsubr %st(0),%st(1) fxch %st(2) fstpl 56(%ecx) fldl 32(%esp) fmull 32(%edx) fldl 40(%esp) fmull 24(%edx) faddp %st(0),%st(1) fldl 48(%esp) fmull 16(%edx) faddp %st(0),%st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull -16(%edx) faddp %st(0),%st(1) fldl 8(%esp) fmull -24(%edx) faddp %st(0),%st(1) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha102 fadd %st(2),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha102 fsubr %st(0),%st(2) fxch %st(3) fstpl 64(%ecx) fldl 40(%esp) fmull 32(%edx) fldl 48(%esp) fmull 24(%edx) faddp %st(0),%st(1) fldl 16(%edx) fmul %st(0),%st(0) faddp %st(0),%st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull -8(%edx) faddp %st(0),%st(1) fldl 8(%esp) fmull -16(%edx) faddp %st(0),%st(1) fldl -24(%edx) fmul %st(0),%st(0) faddp %st(0),%st(1) faddp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha128 fadd %st(3),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha128 fsubr %st(0),%st(3) fxch %st(1) fstpl 72(%ecx) fldl 48(%esp) fldl 32(%edx) fmul %st(0),%st(1) fmul %st(5),%st(0) fxch %st(5) fmull 24(%edx) faddp %st(0),%st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull 0(%edx) faddp %st(0),%st(1) fldl 8(%esp) fmull -8(%edx) faddp %st(0),%st(1) fldl 16(%esp) fmull -16(%edx) faddp %st(0),%st(1) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha153 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha153 fsubr %st(0),%st(1) fxch %st(2) fstpl 80(%ecx) fldl 24(%edx) fmul %st(0),%st(0) faddp %st(0),%st(4) fxch %st(3) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull 8(%edx) faddp %st(0),%st(1) fldl 8(%esp) fmull 0(%edx) faddp %st(0),%st(1) fldl 16(%esp) fmull -8(%edx) faddp %st(0),%st(1) fldl -16(%edx) fmul %st(0),%st(0) faddp %st(0),%st(1) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha179 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha179 fsubr %st(0),%st(1) fldl 24(%edx) fadd %st(0),%st(0) fmull 32(%edx) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull 16(%edx) faddp %st(0),%st(1) fldl 8(%esp) fmull 8(%edx) faddp %st(0),%st(1) fldl 16(%esp) fmull 0(%edx) faddp %st(0),%st(1) fldl 24(%esp) fmull -8(%edx) faddp %st(0),%st(1) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha204 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha204 fsubr %st(0),%st(1) fldl -48(%edx) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(1),%st(0) fldl 48(%edx) fxch %st(1) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fsubr %st(0),%st(2) faddp %st(0),%st(1) fxch %st(4) fstpl 88(%ecx) fxch %st(4) fstpl 96(%ecx) fxch %st(1) fstpl 104(%ecx) fstpl 112(%ecx) fxch %st(1) fstpl 120(%ecx) fstpl 128(%ecx) fldl 32(%ecx) fmull crypto_scalarmult_curve25519_athlon_121665 fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fldl 40(%ecx) fmull crypto_scalarmult_curve25519_athlon_121665 fadd %st(1),%st(0) fxch %st(1) fsubrp %st(0),%st(2) fxch %st(1) fstpl 0(%esp) fldl crypto_scalarmult_curve25519_athlon_alpha255 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha255 fsubr %st(0),%st(1) fmull crypto_scalarmult_curve25519_athlon_scale fxch %st(1) fstpl 8(%esp) fldl -32(%ecx) fmull crypto_scalarmult_curve25519_athlon_121665 faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha26 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha26 fldl -24(%ecx) fmull crypto_scalarmult_curve25519_athlon_121665 fadd %st(1),%st(0) fxch %st(1) fsubrp %st(0),%st(2) fxch %st(1) fstpl -48(%edx) fldl crypto_scalarmult_curve25519_athlon_alpha51 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha51 fldl -16(%ecx) fmull crypto_scalarmult_curve25519_athlon_121665 fadd %st(1),%st(0) fxch %st(1) fsubrp %st(0),%st(2) fxch %st(1) fstpl -40(%edx) fldl crypto_scalarmult_curve25519_athlon_alpha77 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha77 fldl -8(%ecx) fmull crypto_scalarmult_curve25519_athlon_121665 fadd %st(1),%st(0) fxch %st(1) fsubrp %st(0),%st(2) fxch %st(1) fstpl -32(%edx) fldl crypto_scalarmult_curve25519_athlon_alpha102 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha102 fldl 0(%ecx) fmull crypto_scalarmult_curve25519_athlon_121665 fadd %st(1),%st(0) fxch %st(1) fsubrp %st(0),%st(2) fxch %st(1) fstpl -24(%edx) fldl crypto_scalarmult_curve25519_athlon_alpha128 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha128 fldl 8(%ecx) fmull crypto_scalarmult_curve25519_athlon_121665 fadd %st(1),%st(0) fxch %st(1) fsubrp %st(0),%st(2) fxch %st(1) fstpl -16(%edx) fldl crypto_scalarmult_curve25519_athlon_alpha153 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha153 fldl 16(%ecx) fmull crypto_scalarmult_curve25519_athlon_121665 fadd %st(1),%st(0) fxch %st(1) fsubrp %st(0),%st(2) fxch %st(1) fstpl -8(%edx) fldl crypto_scalarmult_curve25519_athlon_alpha179 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha179 fldl 24(%ecx) fmull crypto_scalarmult_curve25519_athlon_121665 fadd %st(1),%st(0) fxch %st(1) fsubrp %st(0),%st(2) fxch %st(1) fstpl 0(%edx) fldl crypto_scalarmult_curve25519_athlon_alpha204 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha204 fldl 0(%esp) fadd %st(1),%st(0) fxch %st(1) fsubrp %st(0),%st(2) fxch %st(1) fstpl 8(%edx) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fldl 8(%esp) fadd %st(1),%st(0) fxch %st(1) fsubrp %st(0),%st(2) fxch %st(1) fstpl 16(%edx) fstpl 48(%ecx) fldl -120(%ecx) fmull -40(%ecx) fmull crypto_scalarmult_curve25519_athlon_scale fldl 64(%edx) fmull -48(%ecx) faddp %st(0),%st(1) fldl 72(%edx) fmull -56(%ecx) faddp %st(0),%st(1) fldl 64(%edx) fmull -40(%ecx) fldl 80(%edx) fmull -64(%ecx) faddp %st(0),%st(2) fldl 72(%edx) fmull -48(%ecx) faddp %st(0),%st(1) fldl 88(%edx) fmull -72(%ecx) faddp %st(0),%st(2) fldl 80(%edx) fmull -56(%ecx) faddp %st(0),%st(1) fldl 96(%edx) fmull -80(%ecx) faddp %st(0),%st(2) fldl 88(%edx) fmull -64(%ecx) faddp %st(0),%st(1) fldl 72(%edx) fmull -40(%ecx) fldl 104(%edx) fmull -88(%ecx) faddp %st(0),%st(3) fldl 96(%edx) fmull -72(%ecx) faddp %st(0),%st(2) fldl 80(%edx) fmull -48(%ecx) faddp %st(0),%st(1) fldl 112(%edx) fmull -96(%ecx) faddp %st(0),%st(3) fldl 104(%edx) fmull -80(%ecx) faddp %st(0),%st(2) fldl 88(%edx) fmull -56(%ecx) faddp %st(0),%st(1) fldl 120(%edx) fmull -104(%ecx) faddp %st(0),%st(3) fldl 112(%edx) fmull -88(%ecx) faddp %st(0),%st(2) fldl 96(%edx) fmull -64(%ecx) faddp %st(0),%st(1) fldl -128(%ecx) fmull -112(%ecx) faddp %st(0),%st(3) fldl 120(%edx) fmull -96(%ecx) faddp %st(0),%st(2) fldl 104(%edx) fmull -72(%ecx) faddp %st(0),%st(1) fldl 80(%edx) fmull -40(%ecx) fldl 112(%edx) fmull -80(%ecx) faddp %st(0),%st(2) fldl -128(%ecx) fmull -104(%ecx) faddp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(4),%st(0) fldl 88(%edx) fmull -48(%ecx) faddp %st(0),%st(2) fldl 120(%edx) fmull -88(%ecx) faddp %st(0),%st(3) fldl -120(%ecx) fmull -112(%ecx) faddp %st(0),%st(4) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fldl 96(%edx) fmull -56(%ecx) faddp %st(0),%st(2) fldl 88(%edx) fmull -40(%ecx) fldl -128(%ecx) fmull -96(%ecx) faddp %st(0),%st(4) fxch %st(1) fadd %st(0),%st(4) fldl 104(%edx) fmull -64(%ecx) faddp %st(0),%st(3) fldl 96(%edx) fmull -48(%ecx) faddp %st(0),%st(2) fsubrp %st(0),%st(5) fldl crypto_scalarmult_curve25519_athlon_alpha255 fadd %st(4),%st(0) fldl 112(%edx) fmull -72(%ecx) faddp %st(0),%st(3) fldl 104(%edx) fmull -56(%ecx) faddp %st(0),%st(2) fldl -120(%ecx) fmull -104(%ecx) faddp %st(0),%st(4) fsubl crypto_scalarmult_curve25519_athlon_alpha255 fldl 120(%edx) fmull -80(%ecx) faddp %st(0),%st(3) fldl 96(%edx) fmull -40(%ecx) fldl 112(%edx) fmull -64(%ecx) faddp %st(0),%st(3) fldl -128(%ecx) fmull -88(%ecx) faddp %st(0),%st(4) fxch %st(1) fadd %st(0),%st(4) fsubrp %st(0),%st(5) fxch %st(5) fstpl 0(%esp) fldl 104(%edx) fmull -48(%ecx) faddp %st(0),%st(5) fldl 120(%edx) fmull -72(%ecx) faddp %st(0),%st(1) fldl -120(%ecx) fmull -96(%ecx) faddp %st(0),%st(2) fxch %st(2) fmull crypto_scalarmult_curve25519_athlon_scale fldl 112(%edx) fmull -56(%ecx) faddp %st(0),%st(5) fldl -128(%ecx) fmull -80(%ecx) faddp %st(0),%st(3) fxch %st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 64(%edx) fmull -112(%ecx) faddp %st(0),%st(2) fxch %st(3) fstpl 8(%esp) fldl 104(%edx) fmull -40(%ecx) fldl 120(%edx) fmull -64(%ecx) faddp %st(0),%st(5) fldl -120(%ecx) fmull -88(%ecx) faddp %st(0),%st(3) fldl 64(%edx) fmull -104(%ecx) faddp %st(0),%st(4) fldl crypto_scalarmult_curve25519_athlon_alpha26 fadd %st(2),%st(0) fldl 112(%edx) fmull -48(%ecx) faddp %st(0),%st(2) fldl -128(%ecx) fmull -72(%ecx) faddp %st(0),%st(6) fxch %st(3) fmull crypto_scalarmult_curve25519_athlon_scale fldl 72(%edx) fmull -112(%ecx) faddp %st(0),%st(5) fxch %st(3) fsubl crypto_scalarmult_curve25519_athlon_alpha26 fldl 120(%edx) fmull -56(%ecx) faddp %st(0),%st(2) fldl -120(%ecx) fmull -80(%ecx) faddp %st(0),%st(6) fldl 64(%edx) fmull -96(%ecx) faddp %st(0),%st(4) fadd %st(0),%st(4) fsubrp %st(0),%st(2) fldl 112(%edx) fmull -40(%ecx) fldl -128(%ecx) fmull -64(%ecx) faddp %st(0),%st(2) fxch %st(5) fmull crypto_scalarmult_curve25519_athlon_scale fldl 72(%edx) fmull -104(%ecx) faddp %st(0),%st(4) fldl crypto_scalarmult_curve25519_athlon_alpha51 fadd %st(5),%st(0) fldl 120(%edx) fmull -48(%ecx) faddp %st(0),%st(7) fldl -120(%ecx) fmull -72(%ecx) faddp %st(0),%st(3) fldl 64(%edx) fmull -88(%ecx) faddp %st(0),%st(2) fldl 80(%edx) fmull -112(%ecx) faddp %st(0),%st(5) fsubl crypto_scalarmult_curve25519_athlon_alpha51 fxch %st(3) fstpl 16(%esp) fldl -128(%ecx) fmull -56(%ecx) faddp %st(0),%st(6) fxch %st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 72(%edx) fmull -96(%ecx) faddp %st(0),%st(2) fxch %st(2) fadd %st(0),%st(3) fsubrp %st(0),%st(4) fldl 120(%edx) fmull -40(%ecx) fldl -120(%ecx) fmull -64(%ecx) faddp %st(0),%st(6) fldl 64(%edx) fmull -80(%ecx) faddp %st(0),%st(3) fldl 80(%edx) fmull -104(%ecx) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha77 fadd %st(4),%st(0) fldl -128(%ecx) fmull -48(%ecx) faddp %st(0),%st(2) fxch %st(6) fmull crypto_scalarmult_curve25519_athlon_scale fldl 72(%edx) fmull -88(%ecx) faddp %st(0),%st(4) fldl 88(%edx) fmull -112(%ecx) faddp %st(0),%st(3) fxch %st(6) fsubl crypto_scalarmult_curve25519_athlon_alpha77 fxch %st(5) fstpl 24(%esp) fldl -120(%ecx) fmull -56(%ecx) faddp %st(0),%st(1) fldl 64(%edx) fmull -72(%ecx) faddp %st(0),%st(6) fldl 80(%edx) fmull -96(%ecx) faddp %st(0),%st(3) fxch %st(4) fadd %st(0),%st(1) fsubrp %st(0),%st(3) fldl -128(%ecx) fmull -40(%ecx) fxch %st(4) fmull crypto_scalarmult_curve25519_athlon_scale fldl 72(%edx) fmull -80(%ecx) faddp %st(0),%st(6) fldl 88(%edx) fmull -104(%ecx) faddp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha102 fadd %st(2),%st(0) fldl -120(%ecx) fmull -48(%ecx) faddp %st(0),%st(6) fldl 64(%edx) fmull -64(%ecx) faddp %st(0),%st(2) fldl 80(%edx) fmull -88(%ecx) faddp %st(0),%st(7) fldl 96(%edx) fmull -112(%ecx) faddp %st(0),%st(4) fsubl crypto_scalarmult_curve25519_athlon_alpha102 fxch %st(4) fstpl 32(%esp) fxch %st(4) fmull crypto_scalarmult_curve25519_athlon_scale fldl 72(%edx) fmull -72(%ecx) faddp %st(0),%st(5) fldl 88(%edx) fmull -96(%ecx) faddp %st(0),%st(6) fxch %st(3) fadd %st(0),%st(2) fsubrp %st(0),%st(1) fldl 64(%edx) fmull -56(%ecx) faddp %st(0),%st(3) fldl 80(%edx) fmull -80(%ecx) faddp %st(0),%st(4) fldl 96(%edx) fmull -104(%ecx) faddp %st(0),%st(5) fldl crypto_scalarmult_curve25519_athlon_alpha128 fadd %st(2),%st(0) fldl 72(%edx) fmull -64(%ecx) faddp %st(0),%st(4) fldl 88(%edx) fmull -88(%ecx) faddp %st(0),%st(5) fldl 104(%edx) fmull -112(%ecx) faddp %st(0),%st(6) fsubl crypto_scalarmult_curve25519_athlon_alpha128 fxch %st(1) fstpl 40(%esp) fldl 80(%edx) fmull -72(%ecx) faddp %st(0),%st(3) fldl 96(%edx) fmull -96(%ecx) faddp %st(0),%st(4) fadd %st(0),%st(4) fsubrp %st(0),%st(1) fstpl 48(%esp) fldl 88(%edx) fmull -80(%ecx) faddp %st(0),%st(1) fldl 104(%edx) fmull -104(%ecx) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha153 fadd %st(3),%st(0) fldl 96(%edx) fmull -88(%ecx) faddp %st(0),%st(2) fldl 112(%edx) fmull -112(%ecx) faddp %st(0),%st(3) fsubl crypto_scalarmult_curve25519_athlon_alpha153 fldl 104(%edx) fmull -96(%ecx) faddp %st(0),%st(2) fadd %st(0),%st(2) fsubrp %st(0),%st(3) fxch %st(2) fstpl 24(%edx) fldl 112(%edx) fmull -104(%ecx) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha179 fadd %st(1),%st(0) fldl 120(%edx) fmull -112(%ecx) faddp %st(0),%st(3) fsubl crypto_scalarmult_curve25519_athlon_alpha179 fldl 0(%esp) fldl 8(%esp) fxch %st(2) fadd %st(0),%st(4) fsubrp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha204 fadd %st(4),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha204 fadd %st(0),%st(1) fsubrp %st(0),%st(4) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fsubr %st(0),%st(1) faddp %st(0),%st(2) fxch %st(2) fstpl 32(%edx) fxch %st(2) fstpl 48(%edx) fstpl 56(%edx) fstpl -112(%ecx) fldl -48(%edx) faddl 64(%edx) fstpl -104(%ecx) fldl -40(%edx) faddl 72(%edx) fstpl -96(%ecx) fldl -32(%edx) faddl 80(%edx) fstpl -88(%ecx) fldl -24(%edx) faddl 88(%edx) fstpl -80(%ecx) fldl -16(%edx) faddl 96(%edx) fstpl -16(%edx) fldl -8(%edx) faddl 104(%edx) fstpl -8(%edx) fldl 0(%edx) faddl 112(%edx) fstpl 0(%edx) fldl 8(%edx) faddl 120(%edx) fstpl 8(%edx) fldl 16(%edx) faddl -128(%ecx) fstpl 16(%edx) fldl 48(%ecx) faddl -120(%ecx) fstpl 80(%edx) fldl 128(%ecx) fmull -128(%edx) fmull crypto_scalarmult_curve25519_athlon_scale fldl 56(%ecx) fmull 120(%esp) faddp %st(0),%st(1) fldl 64(%ecx) fmull 112(%esp) faddp %st(0),%st(1) fldl 56(%ecx) fmull -128(%edx) fldl 72(%ecx) fmull 104(%esp) faddp %st(0),%st(2) fldl 64(%ecx) fmull 120(%esp) faddp %st(0),%st(1) fldl 80(%ecx) fmull 96(%esp) faddp %st(0),%st(2) fldl 72(%ecx) fmull 112(%esp) faddp %st(0),%st(1) fldl 88(%ecx) fmull 88(%esp) faddp %st(0),%st(2) fldl 80(%ecx) fmull 104(%esp) faddp %st(0),%st(1) fldl 64(%ecx) fmull -128(%edx) fldl 96(%ecx) fmull 80(%esp) faddp %st(0),%st(3) fldl 88(%ecx) fmull 96(%esp) faddp %st(0),%st(2) fldl 72(%ecx) fmull 120(%esp) faddp %st(0),%st(1) fldl 104(%ecx) fmull 72(%esp) faddp %st(0),%st(3) fldl 96(%ecx) fmull 88(%esp) faddp %st(0),%st(2) fldl 80(%ecx) fmull 112(%esp) faddp %st(0),%st(1) fldl 112(%ecx) fmull 64(%esp) faddp %st(0),%st(3) fldl 104(%ecx) fmull 80(%esp) faddp %st(0),%st(2) fldl 88(%ecx) fmull 104(%esp) faddp %st(0),%st(1) fldl 120(%ecx) fmull 56(%esp) faddp %st(0),%st(3) fldl 112(%ecx) fmull 72(%esp) faddp %st(0),%st(2) fldl 96(%ecx) fmull 96(%esp) faddp %st(0),%st(1) fldl 72(%ecx) fmull -128(%edx) fldl 104(%ecx) fmull 88(%esp) faddp %st(0),%st(2) fldl 120(%ecx) fmull 64(%esp) faddp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(4),%st(0) fldl 80(%ecx) fmull 120(%esp) faddp %st(0),%st(2) fldl 112(%ecx) fmull 80(%esp) faddp %st(0),%st(3) fldl 128(%ecx) fmull 56(%esp) faddp %st(0),%st(4) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fldl 88(%ecx) fmull 112(%esp) faddp %st(0),%st(2) fldl 80(%ecx) fmull -128(%edx) fldl 120(%ecx) fmull 72(%esp) faddp %st(0),%st(4) fxch %st(1) fadd %st(0),%st(4) fldl 96(%ecx) fmull 104(%esp) faddp %st(0),%st(3) fldl 88(%ecx) fmull 120(%esp) faddp %st(0),%st(2) fsubrp %st(0),%st(5) fldl crypto_scalarmult_curve25519_athlon_alpha255 fadd %st(4),%st(0) fldl 104(%ecx) fmull 96(%esp) faddp %st(0),%st(3) fldl 96(%ecx) fmull 112(%esp) faddp %st(0),%st(2) fldl 128(%ecx) fmull 64(%esp) faddp %st(0),%st(4) fsubl crypto_scalarmult_curve25519_athlon_alpha255 fldl 112(%ecx) fmull 88(%esp) faddp %st(0),%st(3) fldl 88(%ecx) fmull -128(%edx) fldl 104(%ecx) fmull 104(%esp) faddp %st(0),%st(3) fldl 120(%ecx) fmull 80(%esp) faddp %st(0),%st(4) fxch %st(1) fadd %st(0),%st(4) fsubrp %st(0),%st(5) fxch %st(5) fstpl 0(%esp) fldl 96(%ecx) fmull 120(%esp) faddp %st(0),%st(5) fldl 112(%ecx) fmull 96(%esp) faddp %st(0),%st(1) fldl 128(%ecx) fmull 72(%esp) faddp %st(0),%st(2) fxch %st(2) fmull crypto_scalarmult_curve25519_athlon_scale fldl 104(%ecx) fmull 112(%esp) faddp %st(0),%st(5) fldl 120(%ecx) fmull 88(%esp) faddp %st(0),%st(3) fxch %st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 56(%ecx) fmull 56(%esp) faddp %st(0),%st(2) fxch %st(3) fstpl 8(%esp) fldl 96(%ecx) fmull -128(%edx) fldl 112(%ecx) fmull 104(%esp) faddp %st(0),%st(5) fldl 128(%ecx) fmull 80(%esp) faddp %st(0),%st(3) fldl 56(%ecx) fmull 64(%esp) faddp %st(0),%st(4) fldl crypto_scalarmult_curve25519_athlon_alpha26 fadd %st(2),%st(0) fldl 104(%ecx) fmull 120(%esp) faddp %st(0),%st(2) fldl 120(%ecx) fmull 96(%esp) faddp %st(0),%st(6) fxch %st(3) fmull crypto_scalarmult_curve25519_athlon_scale fldl 64(%ecx) fmull 56(%esp) faddp %st(0),%st(5) fxch %st(3) fsubl crypto_scalarmult_curve25519_athlon_alpha26 fldl 112(%ecx) fmull 112(%esp) faddp %st(0),%st(2) fldl 128(%ecx) fmull 88(%esp) faddp %st(0),%st(6) fldl 56(%ecx) fmull 72(%esp) faddp %st(0),%st(4) fadd %st(0),%st(4) fsubrp %st(0),%st(2) fldl 104(%ecx) fmull -128(%edx) fldl 120(%ecx) fmull 104(%esp) faddp %st(0),%st(2) fxch %st(5) fmull crypto_scalarmult_curve25519_athlon_scale fldl 64(%ecx) fmull 64(%esp) faddp %st(0),%st(4) fldl crypto_scalarmult_curve25519_athlon_alpha51 fadd %st(5),%st(0) fldl 112(%ecx) fmull 120(%esp) faddp %st(0),%st(7) fldl 128(%ecx) fmull 96(%esp) faddp %st(0),%st(3) fldl 56(%ecx) fmull 80(%esp) faddp %st(0),%st(2) fldl 72(%ecx) fmull 56(%esp) faddp %st(0),%st(5) fsubl crypto_scalarmult_curve25519_athlon_alpha51 fxch %st(3) fstpl -48(%edx) fldl 120(%ecx) fmull 112(%esp) faddp %st(0),%st(6) fxch %st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 64(%ecx) fmull 72(%esp) faddp %st(0),%st(2) fxch %st(2) fadd %st(0),%st(3) fsubrp %st(0),%st(4) fldl 112(%ecx) fmull -128(%edx) fldl 128(%ecx) fmull 104(%esp) faddp %st(0),%st(6) fldl 56(%ecx) fmull 88(%esp) faddp %st(0),%st(3) fldl 72(%ecx) fmull 64(%esp) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha77 fadd %st(4),%st(0) fldl 120(%ecx) fmull 120(%esp) faddp %st(0),%st(2) fxch %st(6) fmull crypto_scalarmult_curve25519_athlon_scale fldl 64(%ecx) fmull 80(%esp) faddp %st(0),%st(4) fldl 80(%ecx) fmull 56(%esp) faddp %st(0),%st(3) fxch %st(6) fsubl crypto_scalarmult_curve25519_athlon_alpha77 fxch %st(5) fstpl -40(%edx) fldl 128(%ecx) fmull 112(%esp) faddp %st(0),%st(1) fldl 56(%ecx) fmull 96(%esp) faddp %st(0),%st(6) fldl 72(%ecx) fmull 72(%esp) faddp %st(0),%st(3) fxch %st(4) fadd %st(0),%st(1) fsubrp %st(0),%st(3) fldl 120(%ecx) fmull -128(%edx) fxch %st(4) fmull crypto_scalarmult_curve25519_athlon_scale fldl 64(%ecx) fmull 88(%esp) faddp %st(0),%st(6) fldl 80(%ecx) fmull 64(%esp) faddp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha102 fadd %st(2),%st(0) fldl 128(%ecx) fmull 120(%esp) faddp %st(0),%st(6) fldl 56(%ecx) fmull 104(%esp) faddp %st(0),%st(2) fldl 72(%ecx) fmull 80(%esp) faddp %st(0),%st(7) fldl 88(%ecx) fmull 56(%esp) faddp %st(0),%st(4) fsubl crypto_scalarmult_curve25519_athlon_alpha102 fxch %st(4) fstpl -32(%edx) fxch %st(4) fmull crypto_scalarmult_curve25519_athlon_scale fldl 64(%ecx) fmull 96(%esp) faddp %st(0),%st(5) fldl 80(%ecx) fmull 72(%esp) faddp %st(0),%st(6) fxch %st(3) fadd %st(0),%st(2) fsubrp %st(0),%st(1) fldl 56(%ecx) fmull 112(%esp) faddp %st(0),%st(3) fldl 72(%ecx) fmull 88(%esp) faddp %st(0),%st(4) fldl 88(%ecx) fmull 64(%esp) faddp %st(0),%st(5) fldl crypto_scalarmult_curve25519_athlon_alpha128 fadd %st(2),%st(0) fldl 64(%ecx) fmull 104(%esp) faddp %st(0),%st(4) fldl 80(%ecx) fmull 80(%esp) faddp %st(0),%st(5) fldl 96(%ecx) fmull 56(%esp) faddp %st(0),%st(6) fsubl crypto_scalarmult_curve25519_athlon_alpha128 fxch %st(1) fstpl -24(%edx) fldl 72(%ecx) fmull 96(%esp) faddp %st(0),%st(3) fldl 88(%ecx) fmull 72(%esp) faddp %st(0),%st(4) fadd %st(0),%st(4) fsubrp %st(0),%st(1) fstpl 96(%edx) fldl 80(%ecx) fmull 88(%esp) faddp %st(0),%st(1) fldl 96(%ecx) fmull 64(%esp) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha153 fadd %st(3),%st(0) fldl 88(%ecx) fmull 80(%esp) faddp %st(0),%st(2) fldl 104(%ecx) fmull 56(%esp) faddp %st(0),%st(3) fsubl crypto_scalarmult_curve25519_athlon_alpha153 fldl 96(%ecx) fmull 72(%esp) faddp %st(0),%st(2) fadd %st(0),%st(2) fsubrp %st(0),%st(3) fxch %st(2) fstpl 104(%edx) fldl 104(%ecx) fmull 64(%esp) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha179 fadd %st(1),%st(0) fldl 112(%ecx) fmull 56(%esp) faddp %st(0),%st(3) fsubl crypto_scalarmult_curve25519_athlon_alpha179 fldl 0(%esp) fldl 8(%esp) fxch %st(2) fadd %st(0),%st(4) fsubrp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha204 fadd %st(4),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha204 fadd %st(0),%st(1) fsubrp %st(0),%st(4) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fsubr %st(0),%st(1) faddp %st(0),%st(2) fxch %st(2) fstpl 112(%edx) fxch %st(2) fstpl 120(%edx) fstpl -128(%ecx) fstpl -120(%ecx) fldl 80(%edx) fmull 40(%ecx) fmull crypto_scalarmult_curve25519_athlon_scale fldl -104(%ecx) fmull 32(%ecx) faddp %st(0),%st(1) fldl -96(%ecx) fmull 24(%ecx) faddp %st(0),%st(1) fldl -104(%ecx) fmull 40(%ecx) fldl -88(%ecx) fmull 16(%ecx) faddp %st(0),%st(2) fldl -96(%ecx) fmull 32(%ecx) faddp %st(0),%st(1) fldl -80(%ecx) fmull 8(%ecx) faddp %st(0),%st(2) fldl -88(%ecx) fmull 24(%ecx) faddp %st(0),%st(1) fldl -16(%edx) fmull 0(%ecx) faddp %st(0),%st(2) fldl -80(%ecx) fmull 16(%ecx) faddp %st(0),%st(1) fldl -96(%ecx) fmull 40(%ecx) fldl -8(%edx) fmull -8(%ecx) faddp %st(0),%st(3) fldl -16(%edx) fmull 8(%ecx) faddp %st(0),%st(2) fldl -88(%ecx) fmull 32(%ecx) faddp %st(0),%st(1) fldl 0(%edx) fmull -16(%ecx) faddp %st(0),%st(3) fldl -8(%edx) fmull 0(%ecx) faddp %st(0),%st(2) fldl -80(%ecx) fmull 24(%ecx) faddp %st(0),%st(1) fldl 8(%edx) fmull -24(%ecx) faddp %st(0),%st(3) fldl 0(%edx) fmull -8(%ecx) faddp %st(0),%st(2) fldl -16(%edx) fmull 16(%ecx) faddp %st(0),%st(1) fldl 16(%edx) fmull -32(%ecx) faddp %st(0),%st(3) fldl 8(%edx) fmull -16(%ecx) faddp %st(0),%st(2) fldl -8(%edx) fmull 8(%ecx) faddp %st(0),%st(1) fldl -88(%ecx) fmull 40(%ecx) fldl 0(%edx) fmull 0(%ecx) faddp %st(0),%st(2) fldl 16(%edx) fmull -24(%ecx) faddp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(4),%st(0) fldl -80(%ecx) fmull 32(%ecx) faddp %st(0),%st(2) fldl 8(%edx) fmull -8(%ecx) faddp %st(0),%st(3) fldl 80(%edx) fmull -32(%ecx) faddp %st(0),%st(4) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fldl -16(%edx) fmull 24(%ecx) faddp %st(0),%st(2) fldl -80(%ecx) fmull 40(%ecx) fldl 16(%edx) fmull -16(%ecx) faddp %st(0),%st(4) fxch %st(1) fadd %st(0),%st(4) fldl -8(%edx) fmull 16(%ecx) faddp %st(0),%st(3) fldl -16(%edx) fmull 32(%ecx) faddp %st(0),%st(2) fsubrp %st(0),%st(5) fldl crypto_scalarmult_curve25519_athlon_alpha255 fadd %st(4),%st(0) fldl 0(%edx) fmull 8(%ecx) faddp %st(0),%st(3) fldl -8(%edx) fmull 24(%ecx) faddp %st(0),%st(2) fldl 80(%edx) fmull -24(%ecx) faddp %st(0),%st(4) fsubl crypto_scalarmult_curve25519_athlon_alpha255 fldl 8(%edx) fmull 0(%ecx) faddp %st(0),%st(3) fldl -16(%edx) fmull 40(%ecx) fldl 0(%edx) fmull 16(%ecx) faddp %st(0),%st(3) fldl 16(%edx) fmull -8(%ecx) faddp %st(0),%st(4) fxch %st(1) fadd %st(0),%st(4) fsubrp %st(0),%st(5) fxch %st(5) fstpl 0(%esp) fldl -8(%edx) fmull 32(%ecx) faddp %st(0),%st(5) fldl 8(%edx) fmull 8(%ecx) faddp %st(0),%st(1) fldl 80(%edx) fmull -16(%ecx) faddp %st(0),%st(2) fxch %st(2) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%edx) fmull 24(%ecx) faddp %st(0),%st(5) fldl 16(%edx) fmull 0(%ecx) faddp %st(0),%st(3) fxch %st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl -104(%ecx) fmull -32(%ecx) faddp %st(0),%st(2) fxch %st(3) fstpl 8(%esp) fldl -8(%edx) fmull 40(%ecx) fldl 8(%edx) fmull 16(%ecx) faddp %st(0),%st(5) fldl 80(%edx) fmull -8(%ecx) faddp %st(0),%st(3) fldl -104(%ecx) fmull -24(%ecx) faddp %st(0),%st(4) fldl crypto_scalarmult_curve25519_athlon_alpha26 fadd %st(2),%st(0) fldl 0(%edx) fmull 32(%ecx) faddp %st(0),%st(2) fldl 16(%edx) fmull 8(%ecx) faddp %st(0),%st(6) fxch %st(3) fmull crypto_scalarmult_curve25519_athlon_scale fldl -96(%ecx) fmull -32(%ecx) faddp %st(0),%st(5) fxch %st(3) fsubl crypto_scalarmult_curve25519_athlon_alpha26 fldl 8(%edx) fmull 24(%ecx) faddp %st(0),%st(2) fldl 80(%edx) fmull 0(%ecx) faddp %st(0),%st(6) fldl -104(%ecx) fmull -16(%ecx) faddp %st(0),%st(4) fadd %st(0),%st(4) fsubrp %st(0),%st(2) fldl 0(%edx) fmull 40(%ecx) fldl 16(%edx) fmull 16(%ecx) faddp %st(0),%st(2) fxch %st(5) fmull crypto_scalarmult_curve25519_athlon_scale fldl -96(%ecx) fmull -24(%ecx) faddp %st(0),%st(4) fldl crypto_scalarmult_curve25519_athlon_alpha51 fadd %st(5),%st(0) fldl 8(%edx) fmull 32(%ecx) faddp %st(0),%st(7) fldl 80(%edx) fmull 8(%ecx) faddp %st(0),%st(3) fldl -104(%ecx) fmull -8(%ecx) faddp %st(0),%st(2) fldl -88(%ecx) fmull -32(%ecx) faddp %st(0),%st(5) fsubl crypto_scalarmult_curve25519_athlon_alpha51 fxch %st(3) fstpl 64(%edx) fldl 16(%edx) fmull 24(%ecx) faddp %st(0),%st(6) fxch %st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl -96(%ecx) fmull -16(%ecx) faddp %st(0),%st(2) fxch %st(2) fadd %st(0),%st(3) fsubrp %st(0),%st(4) fldl 8(%edx) fmull 40(%ecx) fldl 80(%edx) fmull 16(%ecx) faddp %st(0),%st(6) fldl -104(%ecx) fmull 0(%ecx) faddp %st(0),%st(3) fldl -88(%ecx) fmull -24(%ecx) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha77 fadd %st(4),%st(0) fldl 16(%edx) fmull 32(%ecx) faddp %st(0),%st(2) fxch %st(6) fmull crypto_scalarmult_curve25519_athlon_scale fldl -96(%ecx) fmull -8(%ecx) faddp %st(0),%st(4) fldl -80(%ecx) fmull -32(%ecx) faddp %st(0),%st(3) fxch %st(6) fsubl crypto_scalarmult_curve25519_athlon_alpha77 fxch %st(5) fstpl 72(%edx) fldl 80(%edx) fmull 24(%ecx) faddp %st(0),%st(1) fldl -104(%ecx) fmull 8(%ecx) faddp %st(0),%st(6) fldl -88(%ecx) fmull -16(%ecx) faddp %st(0),%st(3) fxch %st(4) fadd %st(0),%st(1) fsubrp %st(0),%st(3) fldl 16(%edx) fmull 40(%ecx) fxch %st(4) fmull crypto_scalarmult_curve25519_athlon_scale fldl -96(%ecx) fmull 0(%ecx) faddp %st(0),%st(6) fldl -80(%ecx) fmull -24(%ecx) faddp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha102 fadd %st(2),%st(0) fldl 80(%edx) fmull 32(%ecx) faddp %st(0),%st(6) fldl -104(%ecx) fmull 16(%ecx) faddp %st(0),%st(2) fldl -88(%ecx) fmull -8(%ecx) faddp %st(0),%st(7) fldl -16(%edx) fmull -32(%ecx) faddp %st(0),%st(4) fsubl crypto_scalarmult_curve25519_athlon_alpha102 fxch %st(4) fstpl 80(%edx) fxch %st(4) fmull crypto_scalarmult_curve25519_athlon_scale fldl -96(%ecx) fmull 8(%ecx) faddp %st(0),%st(5) fldl -80(%ecx) fmull -16(%ecx) faddp %st(0),%st(6) fxch %st(3) fadd %st(0),%st(2) fsubrp %st(0),%st(1) fldl -104(%ecx) fmull 24(%ecx) faddp %st(0),%st(3) fldl -88(%ecx) fmull 0(%ecx) faddp %st(0),%st(4) fldl -16(%edx) fmull -24(%ecx) faddp %st(0),%st(5) fldl crypto_scalarmult_curve25519_athlon_alpha128 fadd %st(2),%st(0) fldl -96(%ecx) fmull 16(%ecx) faddp %st(0),%st(4) fldl -80(%ecx) fmull -8(%ecx) faddp %st(0),%st(5) fldl -8(%edx) fmull -32(%ecx) faddp %st(0),%st(6) fsubl crypto_scalarmult_curve25519_athlon_alpha128 fxch %st(1) fstpl 88(%edx) fldl -88(%ecx) fmull 8(%ecx) faddp %st(0),%st(3) fldl -16(%edx) fmull -16(%ecx) faddp %st(0),%st(4) fadd %st(0),%st(4) fsubrp %st(0),%st(1) fstpl -104(%ecx) fldl -80(%ecx) fmull 0(%ecx) faddp %st(0),%st(1) fldl -8(%edx) fmull -24(%ecx) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha153 fadd %st(3),%st(0) fldl -16(%edx) fmull -8(%ecx) faddp %st(0),%st(2) fldl 0(%edx) fmull -32(%ecx) faddp %st(0),%st(3) fsubl crypto_scalarmult_curve25519_athlon_alpha153 fldl -8(%edx) fmull -16(%ecx) faddp %st(0),%st(2) fadd %st(0),%st(2) fsubrp %st(0),%st(3) fxch %st(2) fstpl -96(%ecx) fldl 0(%edx) fmull -24(%ecx) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha179 fadd %st(1),%st(0) fldl 8(%edx) fmull -32(%ecx) faddp %st(0),%st(3) fsubl crypto_scalarmult_curve25519_athlon_alpha179 fldl 0(%esp) fldl 8(%esp) fxch %st(2) fadd %st(0),%st(4) fsubrp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha204 fadd %st(4),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha204 fadd %st(0),%st(1) fsubrp %st(0),%st(4) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fsubr %st(0),%st(1) faddp %st(0),%st(2) fxch %st(2) fstpl -88(%ecx) fxch %st(2) fstpl -80(%ecx) fstpl -72(%ecx) fstpl -64(%ecx) fldl 136(%ecx) fldl -120(%edx) fldl 16(%esp) fsubr %st(1),%st(0) fmul %st(2),%st(0) fsubr %st(0),%st(1) faddl 16(%esp) fxch %st(1) fstpl -16(%edx) fstpl 0(%esp) fldl -112(%edx) fldl 24(%esp) fsubr %st(1),%st(0) fmul %st(2),%st(0) fsubr %st(0),%st(1) faddl 24(%esp) fxch %st(1) fstpl -8(%edx) fstpl 8(%esp) fldl -104(%edx) fldl 32(%esp) fsubr %st(1),%st(0) fmul %st(2),%st(0) fsubr %st(0),%st(1) faddl 32(%esp) fxch %st(1) fstpl 0(%edx) fstpl 16(%esp) fldl 40(%edx) fldl 40(%esp) fsubr %st(1),%st(0) fmul %st(2),%st(0) fsubr %st(0),%st(1) faddl 40(%esp) fxch %st(1) fstpl 8(%edx) fstpl 24(%esp) fldl -96(%edx) fldl 48(%esp) fsubr %st(1),%st(0) fmul %st(2),%st(0) fsubr %st(0),%st(1) faddl 48(%esp) fxch %st(1) fstpl 16(%edx) fstpl 32(%esp) fldl -88(%edx) fldl 24(%edx) fsubr %st(1),%st(0) fmul %st(2),%st(0) fsubr %st(0),%st(1) faddl 24(%edx) fxch %st(1) fstpl 24(%edx) fstpl 40(%esp) fldl -80(%edx) fldl 32(%edx) fsubr %st(1),%st(0) fmul %st(2),%st(0) fsubr %st(0),%st(1) faddl 32(%edx) fxch %st(1) fstpl 32(%edx) fstpl 48(%esp) fldl -72(%edx) fldl 48(%edx) fsubr %st(1),%st(0) fmul %st(2),%st(0) fsubr %st(0),%st(1) faddl 48(%edx) fxch %st(1) fstpl 40(%edx) fstpl -120(%edx) fldl -64(%edx) fldl 56(%edx) fsubr %st(1),%st(0) fmul %st(2),%st(0) fsubr %st(0),%st(1) faddl 56(%edx) fxch %st(1) fstpl 48(%edx) fstpl -112(%edx) fldl -56(%edx) fldl -112(%ecx) fsubr %st(1),%st(0) fmul %st(2),%st(0) fsubr %st(0),%st(1) faddl -112(%ecx) fxch %st(1) fstpl 56(%edx) fstpl -104(%edx) fldl -48(%edx) fldl 64(%edx) fsubr %st(1),%st(0) fmul %st(2),%st(0) fsubr %st(0),%st(1) faddl 64(%edx) fxch %st(1) fstpl 64(%edx) fstpl -96(%edx) fldl -40(%edx) fldl 72(%edx) fsubr %st(1),%st(0) fmul %st(2),%st(0) fsubr %st(0),%st(1) faddl 72(%edx) fxch %st(1) fstpl 72(%edx) fstpl -88(%edx) fldl -32(%edx) fldl 80(%edx) fsubr %st(1),%st(0) fmul %st(2),%st(0) fsubr %st(0),%st(1) faddl 80(%edx) fxch %st(1) fstpl 80(%edx) fstpl -80(%edx) fldl -24(%edx) fldl 88(%edx) fsubr %st(1),%st(0) fmul %st(2),%st(0) fsubr %st(0),%st(1) faddl 88(%edx) fxch %st(1) fstpl 88(%edx) fstpl -72(%edx) fldl 96(%edx) fldl -104(%ecx) fsubr %st(1),%st(0) fmul %st(2),%st(0) fsubr %st(0),%st(1) faddl -104(%ecx) fxch %st(1) fstpl 96(%edx) fstpl -64(%edx) fldl 104(%edx) fldl -96(%ecx) fsubr %st(1),%st(0) fmul %st(2),%st(0) fsubr %st(0),%st(1) faddl -96(%ecx) fxch %st(1) fstpl 104(%edx) fstpl -56(%edx) fldl 112(%edx) fldl -88(%ecx) fsubr %st(1),%st(0) fmul %st(2),%st(0) fsubr %st(0),%st(1) faddl -88(%ecx) fxch %st(1) fstpl 112(%edx) fstpl -48(%edx) fldl 120(%edx) fldl -80(%ecx) fsubr %st(1),%st(0) fmul %st(2),%st(0) fsubr %st(0),%st(1) faddl -80(%ecx) fxch %st(1) fstpl 120(%edx) fstpl -40(%edx) fldl -128(%ecx) fldl -72(%ecx) fsubr %st(1),%st(0) fmul %st(2),%st(0) fsubr %st(0),%st(1) faddl -72(%ecx) fxch %st(1) fstpl -128(%ecx) fstpl -32(%edx) fldl -120(%ecx) fldl -64(%ecx) fsubr %st(1),%st(0) fmulp %st(0),%st(2) fsub %st(1),%st(0) fxch %st(1) faddl -64(%ecx) fxch %st(1) fstpl -120(%ecx) fstpl -24(%edx) movl 180(%ecx),%esi movl 184(%ecx),%ebp sub $1,%ebp ja ._morebits movl 188(%ecx),%edi sub $4,%edi jb ._done movl (%ebx,%edi),%esi mov $32,%ebp jmp ._morebytes ._done: movl 4(%esp,%eax),%eax fldl 0(%esp) fstpl 0(%eax) fldl 8(%esp) fstpl 8(%eax) fldl 16(%esp) fstpl 16(%eax) fldl 24(%esp) fstpl 24(%eax) fldl 32(%esp) fstpl 32(%eax) fldl 40(%esp) fstpl 40(%eax) fldl 48(%esp) fstpl 48(%eax) fldl -120(%edx) fstpl 56(%eax) fldl -112(%edx) fstpl 64(%eax) fldl -104(%edx) fstpl 72(%eax) fldl -96(%edx) fstpl 80(%eax) fldl -88(%edx) fstpl 88(%eax) fldl -80(%edx) fstpl 96(%eax) fldl -72(%edx) fstpl 104(%eax) fldl -64(%edx) fstpl 112(%eax) fldl -56(%edx) fstpl 120(%eax) fldl -48(%edx) fstpl 128(%eax) fldl -40(%edx) fstpl 136(%eax) fldl -32(%edx) fstpl 144(%eax) fldl -24(%edx) fstpl 152(%eax) movl 160(%ecx),%eax movl 164(%ecx),%ebx movl 168(%ecx),%esi movl 172(%ecx),%edi movl 176(%ecx),%ebp add %eax,%esp ret curvedns-curvedns-0.87/nacl/crypto_scalarmult/curve25519/athlon/mult.s000066400000000000000000000170611150631715100260050ustar00rootroot00000000000000.text .p2align 5 .globl _crypto_scalarmult_curve25519_athlon_mult .globl crypto_scalarmult_curve25519_athlon_mult _crypto_scalarmult_curve25519_athlon_mult: crypto_scalarmult_curve25519_athlon_mult: mov %esp,%eax and $31,%eax add $32,%eax sub %eax,%esp movl %ebp,0(%esp) movl 4(%esp,%eax),%ecx movl 8(%esp,%eax),%edx movl 12(%esp,%eax),%ebp fldl 72(%edx) fmull 72(%ebp) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%edx) fmull 64(%ebp) faddp %st(0),%st(1) fldl 8(%edx) fmull 56(%ebp) faddp %st(0),%st(1) fldl 0(%edx) fmull 72(%ebp) fldl 16(%edx) fmull 48(%ebp) faddp %st(0),%st(2) fldl 8(%edx) fmull 64(%ebp) faddp %st(0),%st(1) fldl 24(%edx) fmull 40(%ebp) faddp %st(0),%st(2) fldl 16(%edx) fmull 56(%ebp) faddp %st(0),%st(1) fldl 32(%edx) fmull 32(%ebp) faddp %st(0),%st(2) fldl 24(%edx) fmull 48(%ebp) faddp %st(0),%st(1) fldl 8(%edx) fmull 72(%ebp) fldl 40(%edx) fmull 24(%ebp) faddp %st(0),%st(3) fldl 32(%edx) fmull 40(%ebp) faddp %st(0),%st(2) fldl 16(%edx) fmull 64(%ebp) faddp %st(0),%st(1) fldl 48(%edx) fmull 16(%ebp) faddp %st(0),%st(3) fldl 40(%edx) fmull 32(%ebp) faddp %st(0),%st(2) fldl 24(%edx) fmull 56(%ebp) faddp %st(0),%st(1) fldl 56(%edx) fmull 8(%ebp) faddp %st(0),%st(3) fldl 48(%edx) fmull 24(%ebp) faddp %st(0),%st(2) fldl 32(%edx) fmull 48(%ebp) faddp %st(0),%st(1) fldl 64(%edx) fmull 0(%ebp) faddp %st(0),%st(3) fldl 56(%edx) fmull 16(%ebp) faddp %st(0),%st(2) fldl 40(%edx) fmull 40(%ebp) faddp %st(0),%st(1) fldl 16(%edx) fmull 72(%ebp) fldl 48(%edx) fmull 32(%ebp) faddp %st(0),%st(2) fldl 64(%edx) fmull 8(%ebp) faddp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(4),%st(0) fldl 24(%edx) fmull 64(%ebp) faddp %st(0),%st(2) fldl 56(%edx) fmull 24(%ebp) faddp %st(0),%st(3) fldl 72(%edx) fmull 0(%ebp) faddp %st(0),%st(4) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fldl 32(%edx) fmull 56(%ebp) faddp %st(0),%st(2) fldl 24(%edx) fmull 72(%ebp) fldl 64(%edx) fmull 16(%ebp) faddp %st(0),%st(4) fxch %st(1) fadd %st(0),%st(4) fldl 40(%edx) fmull 48(%ebp) faddp %st(0),%st(3) fldl 32(%edx) fmull 64(%ebp) faddp %st(0),%st(2) fsubrp %st(0),%st(5) fldl crypto_scalarmult_curve25519_athlon_alpha255 fadd %st(4),%st(0) fldl 48(%edx) fmull 40(%ebp) faddp %st(0),%st(3) fldl 40(%edx) fmull 56(%ebp) faddp %st(0),%st(2) fldl 72(%edx) fmull 8(%ebp) faddp %st(0),%st(4) fsubl crypto_scalarmult_curve25519_athlon_alpha255 fldl 56(%edx) fmull 32(%ebp) faddp %st(0),%st(3) fldl 32(%edx) fmull 72(%ebp) fldl 48(%edx) fmull 48(%ebp) faddp %st(0),%st(3) fldl 64(%edx) fmull 24(%ebp) faddp %st(0),%st(4) fxch %st(1) fadd %st(0),%st(4) fsubrp %st(0),%st(5) fxch %st(5) fstpl 64(%ecx) fldl 40(%edx) fmull 64(%ebp) faddp %st(0),%st(5) fldl 56(%edx) fmull 40(%ebp) faddp %st(0),%st(1) fldl 72(%edx) fmull 16(%ebp) faddp %st(0),%st(2) fxch %st(2) fmull crypto_scalarmult_curve25519_athlon_scale fldl 48(%edx) fmull 56(%ebp) faddp %st(0),%st(5) fldl 64(%edx) fmull 32(%ebp) faddp %st(0),%st(3) fxch %st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%edx) fmull 0(%ebp) faddp %st(0),%st(2) fxch %st(3) fstpl 72(%ecx) fldl 40(%edx) fmull 72(%ebp) fldl 56(%edx) fmull 48(%ebp) faddp %st(0),%st(5) fldl 72(%edx) fmull 24(%ebp) faddp %st(0),%st(3) fldl 0(%edx) fmull 8(%ebp) faddp %st(0),%st(4) fldl crypto_scalarmult_curve25519_athlon_alpha26 fadd %st(2),%st(0) fldl 48(%edx) fmull 64(%ebp) faddp %st(0),%st(2) fldl 64(%edx) fmull 40(%ebp) faddp %st(0),%st(6) fxch %st(3) fmull crypto_scalarmult_curve25519_athlon_scale fldl 8(%edx) fmull 0(%ebp) faddp %st(0),%st(5) fxch %st(3) fsubl crypto_scalarmult_curve25519_athlon_alpha26 fldl 56(%edx) fmull 56(%ebp) faddp %st(0),%st(2) fldl 72(%edx) fmull 32(%ebp) faddp %st(0),%st(6) fldl 0(%edx) fmull 16(%ebp) faddp %st(0),%st(4) fadd %st(0),%st(4) fsubrp %st(0),%st(2) fldl 48(%edx) fmull 72(%ebp) fldl 64(%edx) fmull 48(%ebp) faddp %st(0),%st(2) fxch %st(5) fmull crypto_scalarmult_curve25519_athlon_scale fldl 8(%edx) fmull 8(%ebp) faddp %st(0),%st(4) fldl crypto_scalarmult_curve25519_athlon_alpha51 fadd %st(5),%st(0) fldl 56(%edx) fmull 64(%ebp) faddp %st(0),%st(7) fldl 72(%edx) fmull 40(%ebp) faddp %st(0),%st(3) fldl 0(%edx) fmull 24(%ebp) faddp %st(0),%st(2) fldl 16(%edx) fmull 0(%ebp) faddp %st(0),%st(5) fsubl crypto_scalarmult_curve25519_athlon_alpha51 fxch %st(3) fstpl 0(%ecx) fldl 64(%edx) fmull 56(%ebp) faddp %st(0),%st(6) fxch %st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 8(%edx) fmull 16(%ebp) faddp %st(0),%st(2) fxch %st(2) fadd %st(0),%st(3) fsubrp %st(0),%st(4) fldl 56(%edx) fmull 72(%ebp) fldl 72(%edx) fmull 48(%ebp) faddp %st(0),%st(6) fldl 0(%edx) fmull 32(%ebp) faddp %st(0),%st(3) fldl 16(%edx) fmull 8(%ebp) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha77 fadd %st(4),%st(0) fldl 64(%edx) fmull 64(%ebp) faddp %st(0),%st(2) fxch %st(6) fmull crypto_scalarmult_curve25519_athlon_scale fldl 8(%edx) fmull 24(%ebp) faddp %st(0),%st(4) fldl 24(%edx) fmull 0(%ebp) faddp %st(0),%st(3) fxch %st(6) fsubl crypto_scalarmult_curve25519_athlon_alpha77 fxch %st(5) fstpl 8(%ecx) fldl 72(%edx) fmull 56(%ebp) faddp %st(0),%st(1) fldl 0(%edx) fmull 40(%ebp) faddp %st(0),%st(6) fldl 16(%edx) fmull 16(%ebp) faddp %st(0),%st(3) fxch %st(4) fadd %st(0),%st(1) fsubrp %st(0),%st(3) fldl 64(%edx) fmull 72(%ebp) fxch %st(4) fmull crypto_scalarmult_curve25519_athlon_scale fldl 8(%edx) fmull 32(%ebp) faddp %st(0),%st(6) fldl 24(%edx) fmull 8(%ebp) faddp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha102 fadd %st(2),%st(0) fldl 72(%edx) fmull 64(%ebp) faddp %st(0),%st(6) fldl 0(%edx) fmull 48(%ebp) faddp %st(0),%st(2) fldl 16(%edx) fmull 24(%ebp) faddp %st(0),%st(7) fldl 32(%edx) fmull 0(%ebp) faddp %st(0),%st(4) fsubl crypto_scalarmult_curve25519_athlon_alpha102 fxch %st(4) fstpl 16(%ecx) fxch %st(4) fmull crypto_scalarmult_curve25519_athlon_scale fldl 8(%edx) fmull 40(%ebp) faddp %st(0),%st(5) fldl 24(%edx) fmull 16(%ebp) faddp %st(0),%st(6) fxch %st(3) fadd %st(0),%st(2) fsubrp %st(0),%st(1) fldl 0(%edx) fmull 56(%ebp) faddp %st(0),%st(3) fldl 16(%edx) fmull 32(%ebp) faddp %st(0),%st(4) fldl 32(%edx) fmull 8(%ebp) faddp %st(0),%st(5) fldl crypto_scalarmult_curve25519_athlon_alpha128 fadd %st(2),%st(0) fldl 8(%edx) fmull 48(%ebp) faddp %st(0),%st(4) fldl 24(%edx) fmull 24(%ebp) faddp %st(0),%st(5) fldl 40(%edx) fmull 0(%ebp) faddp %st(0),%st(6) fsubl crypto_scalarmult_curve25519_athlon_alpha128 fxch %st(1) fstpl 24(%ecx) fldl 16(%edx) fmull 40(%ebp) faddp %st(0),%st(3) fldl 32(%edx) fmull 16(%ebp) faddp %st(0),%st(4) fadd %st(0),%st(4) fsubrp %st(0),%st(1) fstpl 32(%ecx) fldl 24(%edx) fmull 32(%ebp) faddp %st(0),%st(1) fldl 40(%edx) fmull 8(%ebp) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha153 fadd %st(3),%st(0) fldl 32(%edx) fmull 24(%ebp) faddp %st(0),%st(2) fldl 48(%edx) fmull 0(%ebp) faddp %st(0),%st(3) fsubl crypto_scalarmult_curve25519_athlon_alpha153 fldl 40(%edx) fmull 16(%ebp) faddp %st(0),%st(2) fadd %st(0),%st(2) fsubrp %st(0),%st(3) fxch %st(2) fstpl 40(%ecx) fldl 48(%edx) fmull 8(%ebp) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha179 fadd %st(1),%st(0) fldl 56(%edx) fmull 0(%ebp) faddp %st(0),%st(3) fsubl crypto_scalarmult_curve25519_athlon_alpha179 fldl 64(%ecx) fldl 72(%ecx) fxch %st(2) fadd %st(0),%st(4) fsubrp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha204 fadd %st(4),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha204 fadd %st(0),%st(1) fsubrp %st(0),%st(4) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fsubr %st(0),%st(1) faddp %st(0),%st(2) fxch %st(2) fstpl 48(%ecx) fxch %st(2) fstpl 56(%ecx) fstpl 64(%ecx) fstpl 72(%ecx) movl 0(%esp),%ebp add %eax,%esp ret curvedns-curvedns-0.87/nacl/crypto_scalarmult/curve25519/athlon/smult.c000066400000000000000000000053731150631715100261530ustar00rootroot00000000000000#include "crypto_scalarmult.h" #define mult crypto_scalarmult_curve25519_athlon_mult #define square crypto_scalarmult_curve25519_athlon_square void crypto_scalarmult_curve25519_athlon_recip(double out[10],const double z[10]) { double z2[10]; double z9[10]; double z11[10]; double z2_5_0[10]; double z2_10_0[10]; double z2_20_0[10]; double z2_50_0[10]; double z2_100_0[10]; double t0[10]; double t1[10]; int i; /* 2 */ square(z2,z); /* 4 */ square(t1,z2); /* 8 */ square(t0,t1); /* 9 */ mult(z9,t0,z); /* 11 */ mult(z11,z9,z2); /* 22 */ square(t0,z11); /* 2^5 - 2^0 = 31 */ mult(z2_5_0,t0,z9); /* 2^6 - 2^1 */ square(t0,z2_5_0); /* 2^7 - 2^2 */ square(t1,t0); /* 2^8 - 2^3 */ square(t0,t1); /* 2^9 - 2^4 */ square(t1,t0); /* 2^10 - 2^5 */ square(t0,t1); /* 2^10 - 2^0 */ mult(z2_10_0,t0,z2_5_0); /* 2^11 - 2^1 */ square(t0,z2_10_0); /* 2^12 - 2^2 */ square(t1,t0); /* 2^20 - 2^10 */ for (i = 2;i < 10;i += 2) { square(t0,t1); square(t1,t0); } /* 2^20 - 2^0 */ mult(z2_20_0,t1,z2_10_0); /* 2^21 - 2^1 */ square(t0,z2_20_0); /* 2^22 - 2^2 */ square(t1,t0); /* 2^40 - 2^20 */ for (i = 2;i < 20;i += 2) { square(t0,t1); square(t1,t0); } /* 2^40 - 2^0 */ mult(t0,t1,z2_20_0); /* 2^41 - 2^1 */ square(t1,t0); /* 2^42 - 2^2 */ square(t0,t1); /* 2^50 - 2^10 */ for (i = 2;i < 10;i += 2) { square(t1,t0); square(t0,t1); } /* 2^50 - 2^0 */ mult(z2_50_0,t0,z2_10_0); /* 2^51 - 2^1 */ square(t0,z2_50_0); /* 2^52 - 2^2 */ square(t1,t0); /* 2^100 - 2^50 */ for (i = 2;i < 50;i += 2) { square(t0,t1); square(t1,t0); } /* 2^100 - 2^0 */ mult(z2_100_0,t1,z2_50_0); /* 2^101 - 2^1 */ square(t1,z2_100_0); /* 2^102 - 2^2 */ square(t0,t1); /* 2^200 - 2^100 */ for (i = 2;i < 100;i += 2) { square(t1,t0); square(t0,t1); } /* 2^200 - 2^0 */ mult(t1,t0,z2_100_0); /* 2^201 - 2^1 */ square(t0,t1); /* 2^202 - 2^2 */ square(t1,t0); /* 2^250 - 2^50 */ for (i = 2;i < 50;i += 2) { square(t0,t1); square(t1,t0); } /* 2^250 - 2^0 */ mult(t0,t1,z2_50_0); /* 2^251 - 2^1 */ square(t1,t0); /* 2^252 - 2^2 */ square(t0,t1); /* 2^253 - 2^3 */ square(t1,t0); /* 2^254 - 2^4 */ square(t0,t1); /* 2^255 - 2^5 */ square(t1,t0); /* 2^255 - 21 */ mult(out,t1,z11); } int crypto_scalarmult(unsigned char *q, const unsigned char *n, const unsigned char *p) { double work[30]; unsigned char e[32]; int i; for (i = 0;i < 32;++i) e[i] = n[i]; e[0] &= 248; e[31] &= 127; e[31] |= 64; crypto_scalarmult_curve25519_athlon_init(); crypto_scalarmult_curve25519_athlon_todouble(work,p); crypto_scalarmult_curve25519_athlon_mainloop(work,e); crypto_scalarmult_curve25519_athlon_recip(work + 10,work + 10); mult(work + 20,work,work + 10); crypto_scalarmult_curve25519_athlon_fromdouble(q,work + 20); return 0; } curvedns-curvedns-0.87/nacl/crypto_scalarmult/curve25519/athlon/square.s000066400000000000000000000135471150631715100263310ustar00rootroot00000000000000.text .p2align 5 .globl _crypto_scalarmult_curve25519_athlon_square .globl crypto_scalarmult_curve25519_athlon_square _crypto_scalarmult_curve25519_athlon_square: crypto_scalarmult_curve25519_athlon_square: mov %esp,%eax and $31,%eax add $64,%eax sub %eax,%esp movl 8(%esp,%eax),%edx movl 4(%esp,%eax),%ecx fldl 72(%edx) fmul %st(0),%st(0) fldl 0(%edx) fadd %st(0),%st(0) fldl 8(%edx) fadd %st(0),%st(0) fldl 16(%edx) fadd %st(0),%st(0) fldl 56(%edx) fxch %st(4) fmull crypto_scalarmult_curve25519_athlon_scale fldl 72(%edx) fmul %st(4),%st(0) fldl 64(%edx) fmul %st(4),%st(0) faddp %st(0),%st(1) fxch %st(4) fstl 0(%esp) fxch %st(3) fstl 8(%esp) fxch %st(3) fmull 64(%edx) faddp %st(0),%st(1) fldl 48(%edx) fxch %st(5) fmul %st(0),%st(3) fxch %st(3) faddp %st(0),%st(1) fxch %st(2) fadd %st(0),%st(0) fldl 56(%edx) fmul %st(2),%st(0) faddp %st(0),%st(4) fxch %st(1) fstl 16(%esp) fldl 40(%edx) fxch %st(5) fmul %st(0),%st(1) fxch %st(1) faddp %st(0),%st(3) fadd %st(0),%st(0) fstpl 48(%esp) fldl 24(%edx) fadd %st(0),%st(0) fstl 24(%esp) fldl 48(%edx) fmul %st(1),%st(0) faddp %st(0),%st(4) fmul %st(4),%st(0) faddp %st(0),%st(2) fxch %st(3) fadd %st(0),%st(0) fstpl 40(%esp) fldl 32(%edx) fmul %st(0),%st(0) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fsubr %st(0),%st(1) fldl 8(%esp) fldl 72(%edx) fmul %st(0),%st(1) fldl 16(%esp) fmul %st(0),%st(1) fldl 64(%edx) fmul %st(0),%st(1) fxch %st(1) faddp %st(0),%st(3) fldl 24(%esp) fmul %st(0),%st(1) fxch %st(1) faddp %st(0),%st(2) fldl 32(%edx) fadd %st(0),%st(0) fstl 32(%esp) fmull 40(%edx) faddp %st(0),%st(6) fxch %st(3) faddp %st(0),%st(5) fldl crypto_scalarmult_curve25519_athlon_alpha255 fadd %st(5),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha255 fsubr %st(0),%st(5) fldl 56(%edx) fmul %st(0),%st(4) fxch %st(4) faddp %st(0),%st(3) fldl 32(%esp) fmul %st(0),%st(4) fxch %st(4) faddp %st(0),%st(2) fldl 48(%edx) fmul %st(0),%st(4) fxch %st(4) faddp %st(0),%st(3) fxch %st(3) fmull 40(%esp) faddp %st(0),%st(1) fxch %st(3) fstpl 64(%ecx) fldl 40(%edx) fmul %st(0),%st(0) faddp %st(0),%st(1) fxch %st(2) fmull crypto_scalarmult_curve25519_athlon_scale fxch %st(3) fstpl 72(%ecx) faddp %st(0),%st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 24(%esp) fmull 72(%edx) fldl 0(%edx) fmul %st(0),%st(0) faddp %st(0),%st(2) fldl 32(%esp) fmull 64(%edx) faddp %st(0),%st(1) fldl 0(%esp) fmull 8(%edx) faddp %st(0),%st(3) fldl 40(%esp) fmull 56(%edx) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha26 fadd %st(2),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha26 fsubr %st(0),%st(2) faddp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha51 fadd %st(3),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha51 fsubr %st(0),%st(3) fldl 48(%edx) fmul %st(0),%st(0) faddp %st(0),%st(2) fxch %st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull 16(%edx) faddp %st(0),%st(1) fldl 8(%edx) fmul %st(0),%st(0) faddp %st(0),%st(1) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha77 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha77 fsubr %st(0),%st(1) fxch %st(2) fstpl 0(%ecx) fldl 32(%esp) fmull 72(%edx) fldl 40(%esp) fmull 64(%edx) faddp %st(0),%st(1) fldl 48(%esp) fmull 56(%edx) faddp %st(0),%st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull 24(%edx) faddp %st(0),%st(1) fldl 8(%esp) fmull 16(%edx) faddp %st(0),%st(1) faddp %st(0),%st(2) fldl crypto_scalarmult_curve25519_athlon_alpha102 fadd %st(2),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha102 fsubr %st(0),%st(2) fxch %st(3) fstpl 8(%ecx) fldl 40(%esp) fmull 72(%edx) fldl 48(%esp) fmull 64(%edx) faddp %st(0),%st(1) fldl 56(%edx) fmul %st(0),%st(0) faddp %st(0),%st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull 32(%edx) faddp %st(0),%st(1) fldl 8(%esp) fmull 24(%edx) faddp %st(0),%st(1) fldl 16(%edx) fmul %st(0),%st(0) faddp %st(0),%st(1) faddp %st(0),%st(3) fldl crypto_scalarmult_curve25519_athlon_alpha128 fadd %st(3),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha128 fsubr %st(0),%st(3) fxch %st(1) fstpl 16(%ecx) fldl 48(%esp) fldl 72(%edx) fmul %st(0),%st(1) fmul %st(5),%st(0) fxch %st(5) fmull 64(%edx) faddp %st(0),%st(1) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull 40(%edx) faddp %st(0),%st(1) fldl 8(%esp) fmull 32(%edx) faddp %st(0),%st(1) fldl 16(%esp) fmull 24(%edx) faddp %st(0),%st(1) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha153 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha153 fsubr %st(0),%st(1) fxch %st(2) fstpl 24(%ecx) fldl 64(%edx) fmul %st(0),%st(0) faddp %st(0),%st(4) fxch %st(3) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull 48(%edx) faddp %st(0),%st(1) fldl 8(%esp) fmull 40(%edx) faddp %st(0),%st(1) fldl 16(%esp) fmull 32(%edx) faddp %st(0),%st(1) fldl 24(%edx) fmul %st(0),%st(0) faddp %st(0),%st(1) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha179 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha179 fsubr %st(0),%st(1) fldl 64(%edx) fadd %st(0),%st(0) fmull 72(%edx) fmull crypto_scalarmult_curve25519_athlon_scale fldl 0(%esp) fmull 56(%edx) faddp %st(0),%st(1) fldl 8(%esp) fmull 48(%edx) faddp %st(0),%st(1) fldl 16(%esp) fmull 40(%edx) faddp %st(0),%st(1) fldl 24(%esp) fmull 32(%edx) faddp %st(0),%st(1) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha204 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha204 fsubr %st(0),%st(1) fldl 64(%ecx) faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(1),%st(0) fldl 72(%ecx) fxch %st(1) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fsubr %st(0),%st(2) faddp %st(0),%st(1) fxch %st(4) fstpl 32(%ecx) fxch %st(4) fstpl 40(%ecx) fxch %st(1) fstpl 48(%ecx) fstpl 56(%ecx) fxch %st(1) fstpl 64(%ecx) fstpl 72(%ecx) add %eax,%esp ret curvedns-curvedns-0.87/nacl/crypto_scalarmult/curve25519/athlon/todouble.s000066400000000000000000000072061150631715100266410ustar00rootroot00000000000000.text .p2align 5 .globl _crypto_scalarmult_curve25519_athlon_todouble .globl crypto_scalarmult_curve25519_athlon_todouble _crypto_scalarmult_curve25519_athlon_todouble: crypto_scalarmult_curve25519_athlon_todouble: mov %esp,%eax and $31,%eax add $96,%eax sub %eax,%esp movl 8(%esp,%eax),%ecx movl 0(%ecx),%edx movl $0x43300000,4(%esp) movl %edx,0(%esp) movl 4(%ecx),%edx and $0xffffff,%edx movl $0x45300000,12(%esp) movl %edx,8(%esp) movl 7(%ecx),%edx and $0xffffff,%edx movl $0x46b00000,20(%esp) movl %edx,16(%esp) movl 10(%ecx),%edx and $0xffffff,%edx movl $0x48300000,28(%esp) movl %edx,24(%esp) movl 13(%ecx),%edx and $0xffffff,%edx movl $0x49b00000,36(%esp) movl %edx,32(%esp) movl 16(%ecx),%edx movl $0x4b300000,44(%esp) movl %edx,40(%esp) movl 20(%ecx),%edx and $0xffffff,%edx movl $0x4d300000,52(%esp) movl %edx,48(%esp) movl 23(%ecx),%edx and $0xffffff,%edx movl $0x4eb00000,60(%esp) movl %edx,56(%esp) movl 26(%ecx),%edx and $0xffffff,%edx movl $0x50300000,68(%esp) movl %edx,64(%esp) movl 28(%ecx),%ecx shr $8,%ecx and $0x7fffff,%ecx movl $0x51b00000,76(%esp) movl %ecx,72(%esp) movl 4(%esp,%eax),%ecx fldl 72(%esp) fsubl crypto_scalarmult_curve25519_athlon_in9offset fldl crypto_scalarmult_curve25519_athlon_alpha255 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha255 fsubr %st(0),%st(1) fldl 0(%esp) fsubl crypto_scalarmult_curve25519_athlon_in0offset fxch %st(1) fmull crypto_scalarmult_curve25519_athlon_scale faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha26 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha26 fsubr %st(0),%st(1) fxch %st(1) fstpl 0(%ecx) fldl 8(%esp) fsubl crypto_scalarmult_curve25519_athlon_in1offset faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha51 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha51 fsubr %st(0),%st(1) fxch %st(1) fstpl 8(%ecx) fldl 16(%esp) fsubl crypto_scalarmult_curve25519_athlon_in2offset faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha77 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha77 fsubr %st(0),%st(1) fxch %st(1) fstpl 16(%ecx) fldl 24(%esp) fsubl crypto_scalarmult_curve25519_athlon_in3offset faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha102 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha102 fsubr %st(0),%st(1) fxch %st(1) fstpl 24(%ecx) fldl 32(%esp) fsubl crypto_scalarmult_curve25519_athlon_in4offset faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha128 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha128 fsubr %st(0),%st(1) fxch %st(1) fstpl 32(%ecx) fldl 40(%esp) fsubl crypto_scalarmult_curve25519_athlon_in5offset faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha153 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha153 fsubr %st(0),%st(1) fxch %st(1) fstpl 40(%ecx) fldl 48(%esp) fsubl crypto_scalarmult_curve25519_athlon_in6offset faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha179 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha179 fsubr %st(0),%st(1) fxch %st(1) fstpl 48(%ecx) fldl 56(%esp) fsubl crypto_scalarmult_curve25519_athlon_in7offset faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha204 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha204 fsubr %st(0),%st(1) fxch %st(1) fstpl 56(%ecx) fldl 64(%esp) fsubl crypto_scalarmult_curve25519_athlon_in8offset faddp %st(0),%st(1) fldl crypto_scalarmult_curve25519_athlon_alpha230 fadd %st(1),%st(0) fsubl crypto_scalarmult_curve25519_athlon_alpha230 fsubr %st(0),%st(1) fxch %st(1) fstpl 64(%ecx) faddp %st(0),%st(1) fstpl 72(%ecx) add %eax,%esp ret curvedns-curvedns-0.87/nacl/crypto_scalarmult/curve25519/checksum000066400000000000000000000001011150631715100250630ustar00rootroot00000000000000dacdae4a0f12353dfc66757f2fd1fff538fe6616115dace9afb8016a55be2a52 curvedns-curvedns-0.87/nacl/crypto_scalarmult/curve25519/donna_c64/000077500000000000000000000000001150631715100251215ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_scalarmult/curve25519/donna_c64/api.h000066400000000000000000000000661150631715100260450ustar00rootroot00000000000000#define CRYPTO_BYTES 32 #define CRYPTO_SCALARBYTES 32 curvedns-curvedns-0.87/nacl/crypto_scalarmult/curve25519/donna_c64/base.c000066400000000000000000000003061150631715100261760ustar00rootroot00000000000000#include "crypto_scalarmult.h" static const unsigned char basepoint[32] = {9}; int crypto_scalarmult_base(unsigned char *q,const unsigned char *n) { return crypto_scalarmult(q, n, basepoint); } curvedns-curvedns-0.87/nacl/crypto_scalarmult/curve25519/donna_c64/smult.c000066400000000000000000000332141150631715100264340ustar00rootroot00000000000000/* Copyright 2008, Google Inc. * All rights reserved. * * Code released into the public domain. * * curve25519-donna: Curve25519 elliptic curve, public key function * * http://code.google.com/p/curve25519-donna/ * * Adam Langley * * Derived from public domain C code by Daniel J. Bernstein * * More information about curve25519 can be found here * http://cr.yp.to/ecdh.html * * djb's sample implementation of curve25519 is written in a special assembly * language called qhasm and uses the floating point registers. * * This is, almost, a clean room reimplementation from the curve25519 paper. It * uses many of the tricks described therein. Only the crecip function is taken * from the sample implementation. */ #include #include #include "crypto_scalarmult.h" typedef uint8_t u8; typedef uint64_t felem; // This is a special gcc mode for 128-bit integers. It's implemented on 64-bit // platforms only as far as I know. typedef unsigned uint128_t __attribute__((mode(TI))); /* Sum two numbers: output += in */ static void fsum(felem *output, const felem *in) { unsigned i; for (i = 0; i < 5; ++i) output[i] += in[i]; } /* Find the difference of two numbers: output = in - output * (note the order of the arguments!) */ static void fdifference_backwards(felem *ioutput, const felem *iin) { static const int64_t twotothe51 = (1l << 51); const int64_t *in = (const int64_t *) iin; int64_t *out = (int64_t *) ioutput; out[0] = in[0] - out[0]; out[1] = in[1] - out[1]; out[2] = in[2] - out[2]; out[3] = in[3] - out[3]; out[4] = in[4] - out[4]; // An arithmetic shift right of 63 places turns a positive number to 0 and a // negative number to all 1's. This gives us a bitmask that lets us avoid // side-channel prone branches. int64_t t; #define NEGCHAIN(a,b) \ t = out[a] >> 63; \ out[a] += twotothe51 & t; \ out[b] -= 1 & t; #define NEGCHAIN19(a,b) \ t = out[a] >> 63; \ out[a] += twotothe51 & t; \ out[b] -= 19 & t; NEGCHAIN(0, 1); NEGCHAIN(1, 2); NEGCHAIN(2, 3); NEGCHAIN(3, 4); NEGCHAIN19(4, 0); NEGCHAIN(0, 1); NEGCHAIN(1, 2); NEGCHAIN(2, 3); NEGCHAIN(3, 4); } /* Multiply a number by a scalar: output = in * scalar */ static void fscalar_product(felem *output, const felem *in, const felem scalar) { uint128_t a; a = ((uint128_t) in[0]) * scalar; output[0] = a & 0x7ffffffffffff; a = ((uint128_t) in[1]) * scalar + (a >> 51); output[1] = a & 0x7ffffffffffff; a = ((uint128_t) in[2]) * scalar + (a >> 51); output[2] = a & 0x7ffffffffffff; a = ((uint128_t) in[3]) * scalar + (a >> 51); output[3] = a & 0x7ffffffffffff; a = ((uint128_t) in[4]) * scalar + (a >> 51); output[4] = a & 0x7ffffffffffff; output[0] += (a >> 51) * 19; } /* Multiply two numbers: output = in2 * in * * output must be distinct to both inputs. The inputs are reduced coefficient * form, the output is not. */ static void fmul(felem *output, const felem *in2, const felem *in) { uint128_t t[9]; t[0] = ((uint128_t) in[0]) * in2[0]; t[1] = ((uint128_t) in[0]) * in2[1] + ((uint128_t) in[1]) * in2[0]; t[2] = ((uint128_t) in[0]) * in2[2] + ((uint128_t) in[2]) * in2[0] + ((uint128_t) in[1]) * in2[1]; t[3] = ((uint128_t) in[0]) * in2[3] + ((uint128_t) in[3]) * in2[0] + ((uint128_t) in[1]) * in2[2] + ((uint128_t) in[2]) * in2[1]; t[4] = ((uint128_t) in[0]) * in2[4] + ((uint128_t) in[4]) * in2[0] + ((uint128_t) in[3]) * in2[1] + ((uint128_t) in[1]) * in2[3] + ((uint128_t) in[2]) * in2[2]; t[5] = ((uint128_t) in[4]) * in2[1] + ((uint128_t) in[1]) * in2[4] + ((uint128_t) in[2]) * in2[3] + ((uint128_t) in[3]) * in2[2]; t[6] = ((uint128_t) in[4]) * in2[2] + ((uint128_t) in[2]) * in2[4] + ((uint128_t) in[3]) * in2[3]; t[7] = ((uint128_t) in[3]) * in2[4] + ((uint128_t) in[4]) * in2[3]; t[8] = ((uint128_t) in[4]) * in2[4]; t[0] += t[5] * 19; t[1] += t[6] * 19; t[2] += t[7] * 19; t[3] += t[8] * 19; t[1] += t[0] >> 51; t[0] &= 0x7ffffffffffff; t[2] += t[1] >> 51; t[1] &= 0x7ffffffffffff; t[3] += t[2] >> 51; t[2] &= 0x7ffffffffffff; t[4] += t[3] >> 51; t[3] &= 0x7ffffffffffff; t[0] += 19 * (t[4] >> 51); t[4] &= 0x7ffffffffffff; t[1] += t[0] >> 51; t[0] &= 0x7ffffffffffff; t[2] += t[1] >> 51; t[1] &= 0x7ffffffffffff; output[0] = t[0]; output[1] = t[1]; output[2] = t[2]; output[3] = t[3]; output[4] = t[4]; } static void fsquare(felem *output, const felem *in) { uint128_t t[9]; t[0] = ((uint128_t) in[0]) * in[0]; t[1] = ((uint128_t) in[0]) * in[1] * 2; t[2] = ((uint128_t) in[0]) * in[2] * 2 + ((uint128_t) in[1]) * in[1]; t[3] = ((uint128_t) in[0]) * in[3] * 2 + ((uint128_t) in[1]) * in[2] * 2; t[4] = ((uint128_t) in[0]) * in[4] * 2 + ((uint128_t) in[3]) * in[1] * 2 + ((uint128_t) in[2]) * in[2]; t[5] = ((uint128_t) in[4]) * in[1] * 2 + ((uint128_t) in[2]) * in[3] * 2; t[6] = ((uint128_t) in[4]) * in[2] * 2 + ((uint128_t) in[3]) * in[3]; t[7] = ((uint128_t) in[3]) * in[4] * 2; t[8] = ((uint128_t) in[4]) * in[4]; t[0] += t[5] * 19; t[1] += t[6] * 19; t[2] += t[7] * 19; t[3] += t[8] * 19; t[1] += t[0] >> 51; t[0] &= 0x7ffffffffffff; t[2] += t[1] >> 51; t[1] &= 0x7ffffffffffff; t[3] += t[2] >> 51; t[2] &= 0x7ffffffffffff; t[4] += t[3] >> 51; t[3] &= 0x7ffffffffffff; t[0] += 19 * (t[4] >> 51); t[4] &= 0x7ffffffffffff; t[1] += t[0] >> 51; t[0] &= 0x7ffffffffffff; output[0] = t[0]; output[1] = t[1]; output[2] = t[2]; output[3] = t[3]; output[4] = t[4]; } /* Take a little-endian, 32-byte number and expand it into polynomial form */ static void fexpand(felem *output, const u8 *in) { output[0] = *((const uint64_t *)(in)) & 0x7ffffffffffff; output[1] = (*((const uint64_t *)(in+6)) >> 3) & 0x7ffffffffffff; output[2] = (*((const uint64_t *)(in+12)) >> 6) & 0x7ffffffffffff; output[3] = (*((const uint64_t *)(in+19)) >> 1) & 0x7ffffffffffff; output[4] = (*((const uint64_t *)(in+25)) >> 4) & 0x7ffffffffffff; } /* Take a fully reduced polynomial form number and contract it into a * little-endian, 32-byte array */ static void fcontract(u8 *output, const felem *input) { uint128_t t[5]; t[0] = input[0]; t[1] = input[1]; t[2] = input[2]; t[3] = input[3]; t[4] = input[4]; t[1] += t[0] >> 51; t[0] &= 0x7ffffffffffff; t[2] += t[1] >> 51; t[1] &= 0x7ffffffffffff; t[3] += t[2] >> 51; t[2] &= 0x7ffffffffffff; t[4] += t[3] >> 51; t[3] &= 0x7ffffffffffff; t[0] += 19 * (t[4] >> 51); t[4] &= 0x7ffffffffffff; t[1] += t[0] >> 51; t[0] &= 0x7ffffffffffff; t[2] += t[1] >> 51; t[1] &= 0x7ffffffffffff; t[3] += t[2] >> 51; t[2] &= 0x7ffffffffffff; t[4] += t[3] >> 51; t[3] &= 0x7ffffffffffff; t[0] += 19 * (t[4] >> 51); t[4] &= 0x7ffffffffffff; /* now t is between 0 and 2^255-1, properly carried. */ /* case 1: between 0 and 2^255-20. case 2: between 2^255-19 and 2^255-1. */ t[0] += 19; t[1] += t[0] >> 51; t[0] &= 0x7ffffffffffff; t[2] += t[1] >> 51; t[1] &= 0x7ffffffffffff; t[3] += t[2] >> 51; t[2] &= 0x7ffffffffffff; t[4] += t[3] >> 51; t[3] &= 0x7ffffffffffff; t[0] += 19 * (t[4] >> 51); t[4] &= 0x7ffffffffffff; /* now between 19 and 2^255-1 in both cases, and offset by 19. */ t[0] += 0x8000000000000 - 19; t[1] += 0x8000000000000 - 1; t[2] += 0x8000000000000 - 1; t[3] += 0x8000000000000 - 1; t[4] += 0x8000000000000 - 1; /* now between 2^255 and 2^256-20, and offset by 2^255. */ t[1] += t[0] >> 51; t[0] &= 0x7ffffffffffff; t[2] += t[1] >> 51; t[1] &= 0x7ffffffffffff; t[3] += t[2] >> 51; t[2] &= 0x7ffffffffffff; t[4] += t[3] >> 51; t[3] &= 0x7ffffffffffff; t[4] &= 0x7ffffffffffff; *((uint64_t *)(output)) = t[0] | (t[1] << 51); *((uint64_t *)(output+8)) = (t[1] >> 13) | (t[2] << 38); *((uint64_t *)(output+16)) = (t[2] >> 26) | (t[3] << 25); *((uint64_t *)(output+24)) = (t[3] >> 39) | (t[4] << 12); } /* Input: Q, Q', Q-Q' * Output: 2Q, Q+Q' * * x2 z3: long form * x3 z3: long form * x z: short form, destroyed * xprime zprime: short form, destroyed * qmqp: short form, preserved */ static void fmonty(felem *x2, felem *z2, /* output 2Q */ felem *x3, felem *z3, /* output Q + Q' */ felem *x, felem *z, /* input Q */ felem *xprime, felem *zprime, /* input Q' */ const felem *qmqp /* input Q - Q' */) { felem origx[5], origxprime[5], zzz[5], xx[5], zz[5], xxprime[5], zzprime[5], zzzprime[5]; memcpy(origx, x, 5 * sizeof(felem)); fsum(x, z); fdifference_backwards(z, origx); // does x - z memcpy(origxprime, xprime, sizeof(felem) * 5); fsum(xprime, zprime); fdifference_backwards(zprime, origxprime); fmul(xxprime, xprime, z); fmul(zzprime, x, zprime); memcpy(origxprime, xxprime, sizeof(felem) * 5); fsum(xxprime, zzprime); fdifference_backwards(zzprime, origxprime); fsquare(x3, xxprime); fsquare(zzzprime, zzprime); fmul(z3, zzzprime, qmqp); fsquare(xx, x); fsquare(zz, z); fmul(x2, xx, zz); fdifference_backwards(zz, xx); // does zz = xx - zz fscalar_product(zzz, zz, 121665); fsum(zzz, xx); fmul(z2, zz, zzz); } // ----------------------------------------------------------------------------- // Maybe swap the contents of two felem arrays (@a and @b), each @len elements // long. Perform the swap iff @swap is non-zero. // // This function performs the swap without leaking any side-channel // information. // ----------------------------------------------------------------------------- static void swap_conditional(felem *a, felem *b, unsigned len, felem iswap) { unsigned i; const felem swap = -iswap; for (i = 0; i < len; ++i) { const felem x = swap & (a[i] ^ b[i]); a[i] ^= x; b[i] ^= x; } } /* Calculates nQ where Q is the x-coordinate of a point on the curve * * resultx/resultz: the x coordinate of the resulting curve point (short form) * n: a little endian, 32-byte number * q: a point of the curve (short form) */ static void cmult(felem *resultx, felem *resultz, const u8 *n, const felem *q) { felem a[5] = {0}, b[5] = {1}, c[5] = {1}, d[5] = {0}; felem *nqpqx = a, *nqpqz = b, *nqx = c, *nqz = d, *t; felem e[5] = {0}, f[5] = {1}, g[5] = {0}, h[5] = {1}; felem *nqpqx2 = e, *nqpqz2 = f, *nqx2 = g, *nqz2 = h; unsigned i, j; memcpy(nqpqx, q, sizeof(felem) * 5); for (i = 0; i < 32; ++i) { u8 byte = n[31 - i]; for (j = 0; j < 8; ++j) { const felem bit = byte >> 7; swap_conditional(nqx, nqpqx, 5, bit); swap_conditional(nqz, nqpqz, 5, bit); fmonty(nqx2, nqz2, nqpqx2, nqpqz2, nqx, nqz, nqpqx, nqpqz, q); swap_conditional(nqx2, nqpqx2, 5, bit); swap_conditional(nqz2, nqpqz2, 5, bit); t = nqx; nqx = nqx2; nqx2 = t; t = nqz; nqz = nqz2; nqz2 = t; t = nqpqx; nqpqx = nqpqx2; nqpqx2 = t; t = nqpqz; nqpqz = nqpqz2; nqpqz2 = t; byte <<= 1; } } memcpy(resultx, nqx, sizeof(felem) * 5); memcpy(resultz, nqz, sizeof(felem) * 5); } // ----------------------------------------------------------------------------- // Shamelessly copied from djb's code // ----------------------------------------------------------------------------- static void crecip(felem *out, const felem *z) { felem z2[5]; felem z9[5]; felem z11[5]; felem z2_5_0[5]; felem z2_10_0[5]; felem z2_20_0[5]; felem z2_50_0[5]; felem z2_100_0[5]; felem t0[5]; felem t1[5]; int i; /* 2 */ fsquare(z2,z); /* 4 */ fsquare(t1,z2); /* 8 */ fsquare(t0,t1); /* 9 */ fmul(z9,t0,z); /* 11 */ fmul(z11,z9,z2); /* 22 */ fsquare(t0,z11); /* 2^5 - 2^0 = 31 */ fmul(z2_5_0,t0,z9); /* 2^6 - 2^1 */ fsquare(t0,z2_5_0); /* 2^7 - 2^2 */ fsquare(t1,t0); /* 2^8 - 2^3 */ fsquare(t0,t1); /* 2^9 - 2^4 */ fsquare(t1,t0); /* 2^10 - 2^5 */ fsquare(t0,t1); /* 2^10 - 2^0 */ fmul(z2_10_0,t0,z2_5_0); /* 2^11 - 2^1 */ fsquare(t0,z2_10_0); /* 2^12 - 2^2 */ fsquare(t1,t0); /* 2^20 - 2^10 */ for (i = 2;i < 10;i += 2) { fsquare(t0,t1); fsquare(t1,t0); } /* 2^20 - 2^0 */ fmul(z2_20_0,t1,z2_10_0); /* 2^21 - 2^1 */ fsquare(t0,z2_20_0); /* 2^22 - 2^2 */ fsquare(t1,t0); /* 2^40 - 2^20 */ for (i = 2;i < 20;i += 2) { fsquare(t0,t1); fsquare(t1,t0); } /* 2^40 - 2^0 */ fmul(t0,t1,z2_20_0); /* 2^41 - 2^1 */ fsquare(t1,t0); /* 2^42 - 2^2 */ fsquare(t0,t1); /* 2^50 - 2^10 */ for (i = 2;i < 10;i += 2) { fsquare(t1,t0); fsquare(t0,t1); } /* 2^50 - 2^0 */ fmul(z2_50_0,t0,z2_10_0); /* 2^51 - 2^1 */ fsquare(t0,z2_50_0); /* 2^52 - 2^2 */ fsquare(t1,t0); /* 2^100 - 2^50 */ for (i = 2;i < 50;i += 2) { fsquare(t0,t1); fsquare(t1,t0); } /* 2^100 - 2^0 */ fmul(z2_100_0,t1,z2_50_0); /* 2^101 - 2^1 */ fsquare(t1,z2_100_0); /* 2^102 - 2^2 */ fsquare(t0,t1); /* 2^200 - 2^100 */ for (i = 2;i < 100;i += 2) { fsquare(t1,t0); fsquare(t0,t1); } /* 2^200 - 2^0 */ fmul(t1,t0,z2_100_0); /* 2^201 - 2^1 */ fsquare(t0,t1); /* 2^202 - 2^2 */ fsquare(t1,t0); /* 2^250 - 2^50 */ for (i = 2;i < 50;i += 2) { fsquare(t0,t1); fsquare(t1,t0); } /* 2^250 - 2^0 */ fmul(t0,t1,z2_50_0); /* 2^251 - 2^1 */ fsquare(t1,t0); /* 2^252 - 2^2 */ fsquare(t0,t1); /* 2^253 - 2^3 */ fsquare(t1,t0); /* 2^254 - 2^4 */ fsquare(t0,t1); /* 2^255 - 2^5 */ fsquare(t1,t0); /* 2^255 - 21 */ fmul(out,t1,z11); } int crypto_scalarmult(u8 *mypublic, const u8 *secret, const u8 *basepoint) { felem bp[5], x[5], z[5], zmone[5]; unsigned char e[32]; int i; for (i = 0;i < 32;++i) e[i] = secret[i]; e[0] &= 248; e[31] &= 127; e[31] |= 64; fexpand(bp, basepoint); cmult(x, z, e, bp); crecip(zmone, z); fmul(z, x, zmone); fcontract(mypublic, z); return 0; } curvedns-curvedns-0.87/nacl/crypto_scalarmult/curve25519/ref/000077500000000000000000000000001150631715100241225ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_scalarmult/curve25519/ref/api.h000066400000000000000000000000661150631715100250460ustar00rootroot00000000000000#define CRYPTO_BYTES 32 #define CRYPTO_SCALARBYTES 32 curvedns-curvedns-0.87/nacl/crypto_scalarmult/curve25519/ref/base.c000066400000000000000000000004411150631715100251770ustar00rootroot00000000000000/* version 20081011 Matthew Dempsky Public domain. Derived from public domain code by D. J. Bernstein. */ #include "crypto_scalarmult.h" const unsigned char base[32] = {9}; int crypto_scalarmult_base(unsigned char *q, const unsigned char *n) { return crypto_scalarmult(q,n,base); } curvedns-curvedns-0.87/nacl/crypto_scalarmult/curve25519/ref/smult.c000066400000000000000000000151131150631715100254330ustar00rootroot00000000000000/* version 20081011 Matthew Dempsky Public domain. Derived from public domain code by D. J. Bernstein. */ #include "crypto_scalarmult.h" static void add(unsigned int out[32],const unsigned int a[32],const unsigned int b[32]) { unsigned int j; unsigned int u; u = 0; for (j = 0;j < 31;++j) { u += a[j] + b[j]; out[j] = u & 255; u >>= 8; } u += a[31] + b[31]; out[31] = u; } static void sub(unsigned int out[32],const unsigned int a[32],const unsigned int b[32]) { unsigned int j; unsigned int u; u = 218; for (j = 0;j < 31;++j) { u += a[j] + 65280 - b[j]; out[j] = u & 255; u >>= 8; } u += a[31] - b[31]; out[31] = u; } static void squeeze(unsigned int a[32]) { unsigned int j; unsigned int u; u = 0; for (j = 0;j < 31;++j) { u += a[j]; a[j] = u & 255; u >>= 8; } u += a[31]; a[31] = u & 127; u = 19 * (u >> 7); for (j = 0;j < 31;++j) { u += a[j]; a[j] = u & 255; u >>= 8; } u += a[31]; a[31] = u; } static const unsigned int minusp[32] = { 19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128 } ; static void freeze(unsigned int a[32]) { unsigned int aorig[32]; unsigned int j; unsigned int negative; for (j = 0;j < 32;++j) aorig[j] = a[j]; add(a,a,minusp); negative = -((a[31] >> 7) & 1); for (j = 0;j < 32;++j) a[j] ^= negative & (aorig[j] ^ a[j]); } static void mult(unsigned int out[32],const unsigned int a[32],const unsigned int b[32]) { unsigned int i; unsigned int j; unsigned int u; for (i = 0;i < 32;++i) { u = 0; for (j = 0;j <= i;++j) u += a[j] * b[i - j]; for (j = i + 1;j < 32;++j) u += 38 * a[j] * b[i + 32 - j]; out[i] = u; } squeeze(out); } static void mult121665(unsigned int out[32],const unsigned int a[32]) { unsigned int j; unsigned int u; u = 0; for (j = 0;j < 31;++j) { u += 121665 * a[j]; out[j] = u & 255; u >>= 8; } u += 121665 * a[31]; out[31] = u & 127; u = 19 * (u >> 7); for (j = 0;j < 31;++j) { u += out[j]; out[j] = u & 255; u >>= 8; } u += out[j]; out[j] = u; } static void square(unsigned int out[32],const unsigned int a[32]) { unsigned int i; unsigned int j; unsigned int u; for (i = 0;i < 32;++i) { u = 0; for (j = 0;j < i - j;++j) u += a[j] * a[i - j]; for (j = i + 1;j < i + 32 - j;++j) u += 38 * a[j] * a[i + 32 - j]; u *= 2; if ((i & 1) == 0) { u += a[i / 2] * a[i / 2]; u += 38 * a[i / 2 + 16] * a[i / 2 + 16]; } out[i] = u; } squeeze(out); } static void select(unsigned int p[64],unsigned int q[64],const unsigned int r[64],const unsigned int s[64],unsigned int b) { unsigned int j; unsigned int t; unsigned int bminus1; bminus1 = b - 1; for (j = 0;j < 64;++j) { t = bminus1 & (r[j] ^ s[j]); p[j] = s[j] ^ t; q[j] = r[j] ^ t; } } static void mainloop(unsigned int work[64],const unsigned char e[32]) { unsigned int xzm1[64]; unsigned int xzm[64]; unsigned int xzmb[64]; unsigned int xzm1b[64]; unsigned int xznb[64]; unsigned int xzn1b[64]; unsigned int a0[64]; unsigned int a1[64]; unsigned int b0[64]; unsigned int b1[64]; unsigned int c1[64]; unsigned int r[32]; unsigned int s[32]; unsigned int t[32]; unsigned int u[32]; unsigned int i; unsigned int j; unsigned int b; int pos; for (j = 0;j < 32;++j) xzm1[j] = work[j]; xzm1[32] = 1; for (j = 33;j < 64;++j) xzm1[j] = 0; xzm[0] = 1; for (j = 1;j < 64;++j) xzm[j] = 0; for (pos = 254;pos >= 0;--pos) { b = e[pos / 8] >> (pos & 7); b &= 1; select(xzmb,xzm1b,xzm,xzm1,b); add(a0,xzmb,xzmb + 32); sub(a0 + 32,xzmb,xzmb + 32); add(a1,xzm1b,xzm1b + 32); sub(a1 + 32,xzm1b,xzm1b + 32); square(b0,a0); square(b0 + 32,a0 + 32); mult(b1,a1,a0 + 32); mult(b1 + 32,a1 + 32,a0); add(c1,b1,b1 + 32); sub(c1 + 32,b1,b1 + 32); square(r,c1 + 32); sub(s,b0,b0 + 32); mult121665(t,s); add(u,t,b0); mult(xznb,b0,b0 + 32); mult(xznb + 32,s,u); square(xzn1b,c1); mult(xzn1b + 32,r,work); select(xzm,xzm1,xznb,xzn1b,b); } for (j = 0;j < 64;++j) work[j] = xzm[j]; } static void recip(unsigned int out[32],const unsigned int z[32]) { unsigned int z2[32]; unsigned int z9[32]; unsigned int z11[32]; unsigned int z2_5_0[32]; unsigned int z2_10_0[32]; unsigned int z2_20_0[32]; unsigned int z2_50_0[32]; unsigned int z2_100_0[32]; unsigned int t0[32]; unsigned int t1[32]; int i; /* 2 */ square(z2,z); /* 4 */ square(t1,z2); /* 8 */ square(t0,t1); /* 9 */ mult(z9,t0,z); /* 11 */ mult(z11,z9,z2); /* 22 */ square(t0,z11); /* 2^5 - 2^0 = 31 */ mult(z2_5_0,t0,z9); /* 2^6 - 2^1 */ square(t0,z2_5_0); /* 2^7 - 2^2 */ square(t1,t0); /* 2^8 - 2^3 */ square(t0,t1); /* 2^9 - 2^4 */ square(t1,t0); /* 2^10 - 2^5 */ square(t0,t1); /* 2^10 - 2^0 */ mult(z2_10_0,t0,z2_5_0); /* 2^11 - 2^1 */ square(t0,z2_10_0); /* 2^12 - 2^2 */ square(t1,t0); /* 2^20 - 2^10 */ for (i = 2;i < 10;i += 2) { square(t0,t1); square(t1,t0); } /* 2^20 - 2^0 */ mult(z2_20_0,t1,z2_10_0); /* 2^21 - 2^1 */ square(t0,z2_20_0); /* 2^22 - 2^2 */ square(t1,t0); /* 2^40 - 2^20 */ for (i = 2;i < 20;i += 2) { square(t0,t1); square(t1,t0); } /* 2^40 - 2^0 */ mult(t0,t1,z2_20_0); /* 2^41 - 2^1 */ square(t1,t0); /* 2^42 - 2^2 */ square(t0,t1); /* 2^50 - 2^10 */ for (i = 2;i < 10;i += 2) { square(t1,t0); square(t0,t1); } /* 2^50 - 2^0 */ mult(z2_50_0,t0,z2_10_0); /* 2^51 - 2^1 */ square(t0,z2_50_0); /* 2^52 - 2^2 */ square(t1,t0); /* 2^100 - 2^50 */ for (i = 2;i < 50;i += 2) { square(t0,t1); square(t1,t0); } /* 2^100 - 2^0 */ mult(z2_100_0,t1,z2_50_0); /* 2^101 - 2^1 */ square(t1,z2_100_0); /* 2^102 - 2^2 */ square(t0,t1); /* 2^200 - 2^100 */ for (i = 2;i < 100;i += 2) { square(t1,t0); square(t0,t1); } /* 2^200 - 2^0 */ mult(t1,t0,z2_100_0); /* 2^201 - 2^1 */ square(t0,t1); /* 2^202 - 2^2 */ square(t1,t0); /* 2^250 - 2^50 */ for (i = 2;i < 50;i += 2) { square(t0,t1); square(t1,t0); } /* 2^250 - 2^0 */ mult(t0,t1,z2_50_0); /* 2^251 - 2^1 */ square(t1,t0); /* 2^252 - 2^2 */ square(t0,t1); /* 2^253 - 2^3 */ square(t1,t0); /* 2^254 - 2^4 */ square(t0,t1); /* 2^255 - 2^5 */ square(t1,t0); /* 2^255 - 21 */ mult(out,t1,z11); } int crypto_scalarmult(unsigned char *q, const unsigned char *n, const unsigned char *p) { unsigned int work[96]; unsigned char e[32]; unsigned int i; for (i = 0;i < 32;++i) e[i] = n[i]; e[0] &= 248; e[31] &= 127; e[31] |= 64; for (i = 0;i < 32;++i) work[i] = p[i]; mainloop(work,e); recip(work + 32,work + 32); mult(work + 64,work,work + 32); freeze(work + 64); for (i = 0;i < 32;++i) q[i] = work[64 + i]; return 0; } curvedns-curvedns-0.87/nacl/crypto_scalarmult/curve25519/used000066400000000000000000000000001150631715100242170ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_scalarmult/measure.c000066400000000000000000000033401150631715100234210ustar00rootroot00000000000000#include #include "randombytes.h" #include "cpucycles.h" #include "crypto_scalarmult.h" extern void printentry(long long,const char *,long long *,long long); extern unsigned char *alignedcalloc(unsigned long long); extern const char *primitiveimplementation; extern const char *implementationversion; extern const char *sizenames[]; extern const long long sizes[]; extern void allocate(void); extern void measure(void); const char *primitiveimplementation = crypto_scalarmult_IMPLEMENTATION; const char *implementationversion = crypto_scalarmult_VERSION; const char *sizenames[] = { "outputbytes", "scalarbytes", 0 }; const long long sizes[] = { crypto_scalarmult_BYTES, crypto_scalarmult_SCALARBYTES }; static unsigned char *m; static unsigned char *n; static unsigned char *p; static unsigned char *q; void preallocate(void) { } void allocate(void) { m = alignedcalloc(crypto_scalarmult_SCALARBYTES); n = alignedcalloc(crypto_scalarmult_SCALARBYTES); p = alignedcalloc(crypto_scalarmult_BYTES); q = alignedcalloc(crypto_scalarmult_BYTES); } #define TIMINGS 63 static long long cycles[TIMINGS + 1]; void measure(void) { int i; int loop; for (loop = 0;loop < LOOPS;++loop) { randombytes(m,crypto_scalarmult_SCALARBYTES); randombytes(n,crypto_scalarmult_SCALARBYTES); for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_scalarmult_base(p,m); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(-1,"base_cycles",cycles,TIMINGS); for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_scalarmult(q,n,p); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(-1,"cycles",cycles,TIMINGS); } } curvedns-curvedns-0.87/nacl/crypto_scalarmult/try.c000066400000000000000000000104121150631715100225740ustar00rootroot00000000000000/* * crypto_scalarmult/try.c version 20090118 * D. J. Bernstein * Public domain. */ #include #include "crypto_scalarmult.h" extern unsigned char *alignedcalloc(unsigned long long); const char *primitiveimplementation = crypto_scalarmult_IMPLEMENTATION; #define mlen crypto_scalarmult_SCALARBYTES #define nlen crypto_scalarmult_SCALARBYTES #define plen crypto_scalarmult_BYTES #define qlen crypto_scalarmult_BYTES #define rlen crypto_scalarmult_BYTES static unsigned char *m; static unsigned char *n; static unsigned char *p; static unsigned char *q; static unsigned char *r; static unsigned char *m2; static unsigned char *n2; static unsigned char *p2; static unsigned char *q2; static unsigned char *r2; void preallocate(void) { } void allocate(void) { m = alignedcalloc(mlen); n = alignedcalloc(nlen); p = alignedcalloc(plen); q = alignedcalloc(qlen); r = alignedcalloc(rlen); m2 = alignedcalloc(mlen + crypto_scalarmult_BYTES); n2 = alignedcalloc(nlen + crypto_scalarmult_BYTES); p2 = alignedcalloc(plen + crypto_scalarmult_BYTES); q2 = alignedcalloc(qlen + crypto_scalarmult_BYTES); r2 = alignedcalloc(rlen + crypto_scalarmult_BYTES); } void predoit(void) { } void doit(void) { crypto_scalarmult(q,n,p); crypto_scalarmult_base(r,n); } char checksum[crypto_scalarmult_BYTES * 2 + 1]; const char *checksum_compute(void) { long long i; long long j; long long tests; for (i = 0;i < mlen;++i) m[i] = i; for (i = 0;i < nlen;++i) n[i] = i + 1; for (i = 0;i < plen;++i) p[i] = i + 2; for (i = 0;i < qlen;++i) q[i] = i + 3; for (i = 0;i < rlen;++i) r[i] = i + 4; for (i = -16;i < 0;++i) p[i] = random(); for (i = -16;i < 0;++i) n[i] = random(); for (i = plen;i < plen + 16;++i) p[i] = random(); for (i = nlen;i < nlen + 16;++i) n[i] = random(); for (i = -16;i < plen + 16;++i) p2[i] = p[i]; for (i = -16;i < nlen + 16;++i) n2[i] = n[i]; if (crypto_scalarmult_base(p,n) != 0) return "crypto_scalarmult_base returns nonzero"; for (i = -16;i < nlen + 16;++i) if (n2[i] != n[i]) return "crypto_scalarmult_base overwrites input"; for (i = -16;i < 0;++i) if (p2[i] != p[i]) return "crypto_scalarmult_base writes before output"; for (i = plen;i < plen + 16;++i) if (p2[i] != p[i]) return "crypto_scalarmult_base writes after output"; for (tests = 0;tests < 100;++tests) { for (i = -16;i < 0;++i) q[i] = random(); for (i = -16;i < 0;++i) p[i] = random(); for (i = -16;i < 0;++i) m[i] = random(); for (i = qlen;i < qlen + 16;++i) q[i] = random(); for (i = plen;i < plen + 16;++i) p[i] = random(); for (i = mlen;i < mlen + 16;++i) m[i] = random(); for (i = -16;i < qlen + 16;++i) q2[i] = q[i]; for (i = -16;i < plen + 16;++i) p2[i] = p[i]; for (i = -16;i < mlen + 16;++i) m2[i] = m[i]; if (crypto_scalarmult(q,m,p) != 0) return "crypto_scalarmult returns nonzero"; for (i = -16;i < mlen + 16;++i) if (m2[i] != m[i]) return "crypto_scalarmult overwrites n input"; for (i = -16;i < plen + 16;++i) if (p2[i] != p[i]) return "crypto_scalarmult overwrites p input"; for (i = -16;i < 0;++i) if (q2[i] != q[i]) return "crypto_scalarmult writes before output"; for (i = qlen;i < qlen + 16;++i) if (q2[i] != q[i]) return "crypto_scalarmult writes after output"; if (crypto_scalarmult(m2,m2,p) != 0) return "crypto_scalarmult returns nonzero"; for (i = 0;i < qlen;++i) if (q[i] != m2[i]) return "crypto_scalarmult does not handle n overlap"; for (i = 0;i < qlen;++i) m2[i] = m[i]; if (crypto_scalarmult(p2,m2,p2) != 0) return "crypto_scalarmult returns nonzero"; for (i = 0;i < qlen;++i) if (q[i] != p2[i]) return "crypto_scalarmult does not handle p overlap"; if (crypto_scalarmult(r,n,q) != 0) return "crypto_scalarmult returns nonzero"; if (crypto_scalarmult(q,n,p) != 0) return "crypto_scalarmult returns nonzero"; if (crypto_scalarmult(p,m,q) != 0) return "crypto_scalarmult returns nonzero"; for (j = 0;j < plen;++j) if (p[j] != r[j]) return "crypto_scalarmult not associative"; for (j = 0;j < mlen;++j) m[j] ^= q[j % qlen]; for (j = 0;j < nlen;++j) n[j] ^= p[j % plen]; } for (i = 0;i < crypto_scalarmult_BYTES;++i) { checksum[2 * i] = "0123456789abcdef"[15 & (p[i] >> 4)]; checksum[2 * i + 1] = "0123456789abcdef"[15 & p[i]]; } checksum[2 * i] = 0; return 0; } curvedns-curvedns-0.87/nacl/crypto_scalarmult/wrapper-base.cpp000066400000000000000000000005331150631715100247110ustar00rootroot00000000000000#include using std::string; #include "crypto_scalarmult.h" string crypto_scalarmult_base(const string &n) { unsigned char q[crypto_scalarmult_BYTES]; if (n.size() != crypto_scalarmult_SCALARBYTES) throw "incorrect scalar length"; crypto_scalarmult_base(q,(const unsigned char *) n.c_str()); return string((char *) q,sizeof q); } curvedns-curvedns-0.87/nacl/crypto_scalarmult/wrapper-mult.cpp000066400000000000000000000007201150631715100247560ustar00rootroot00000000000000#include using std::string; #include "crypto_scalarmult.h" string crypto_scalarmult(const string &n,const string &p) { unsigned char q[crypto_scalarmult_BYTES]; if (n.size() != crypto_scalarmult_SCALARBYTES) throw "incorrect scalar length"; if (p.size() != crypto_scalarmult_BYTES) throw "incorrect element length"; crypto_scalarmult(q,(const unsigned char *) n.c_str(),(const unsigned char *) p.c_str()); return string((char *) q,sizeof q); } curvedns-curvedns-0.87/nacl/crypto_secretbox/000077500000000000000000000000001150631715100214435ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_secretbox/measure.c000066400000000000000000000047011150631715100232520ustar00rootroot00000000000000#include #include "randombytes.h" #include "cpucycles.h" #include "crypto_secretbox.h" extern void printentry(long long,const char *,long long *,long long); extern unsigned char *alignedcalloc(unsigned long long); extern const char *primitiveimplementation; extern const char *implementationversion; extern const char *sizenames[]; extern const long long sizes[]; extern void allocate(void); extern void measure(void); const char *primitiveimplementation = crypto_secretbox_IMPLEMENTATION; const char *implementationversion = crypto_secretbox_VERSION; const char *sizenames[] = { "keybytes", "noncebytes", "zerobytes", "boxzerobytes", 0 }; const long long sizes[] = { crypto_secretbox_KEYBYTES, crypto_secretbox_NONCEBYTES, crypto_secretbox_ZEROBYTES, crypto_secretbox_BOXZEROBYTES }; #define MAXTEST_BYTES 4096 static unsigned char *k; static unsigned char *n; static unsigned char *m; static unsigned char *c; void preallocate(void) { } void allocate(void) { k = alignedcalloc(crypto_secretbox_KEYBYTES); n = alignedcalloc(crypto_secretbox_NONCEBYTES); m = alignedcalloc(MAXTEST_BYTES + crypto_secretbox_ZEROBYTES); c = alignedcalloc(MAXTEST_BYTES + crypto_secretbox_ZEROBYTES); } #define TIMINGS 15 static long long cycles[TIMINGS + 1]; void measure(void) { int i; int loop; int mlen; for (loop = 0;loop < LOOPS;++loop) { for (mlen = 0;mlen <= MAXTEST_BYTES;mlen += 1 + mlen / 8) { randombytes(k,crypto_secretbox_KEYBYTES); randombytes(n,crypto_secretbox_NONCEBYTES); randombytes(m + crypto_secretbox_ZEROBYTES,mlen); randombytes(c,mlen + crypto_secretbox_ZEROBYTES); for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_secretbox(c,m,mlen + crypto_secretbox_ZEROBYTES,n,k); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(mlen,"cycles",cycles,TIMINGS); for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_secretbox_open(m,c,mlen + crypto_secretbox_ZEROBYTES,n,k); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(mlen,"open_cycles",cycles,TIMINGS); ++c[crypto_secretbox_ZEROBYTES]; for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_secretbox_open(m,c,mlen + crypto_secretbox_ZEROBYTES,n,k); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(mlen,"forgery_open_cycles",cycles,TIMINGS); } } } curvedns-curvedns-0.87/nacl/crypto_secretbox/try.c000066400000000000000000000111201150631715100224200ustar00rootroot00000000000000/* * crypto_secretbox/try.c version 20090118 * D. J. Bernstein * Public domain. */ #include "crypto_secretbox.h" extern unsigned char *alignedcalloc(unsigned long long); const char *primitiveimplementation = crypto_secretbox_IMPLEMENTATION; #define MAXTEST_BYTES 10000 #define CHECKSUM_BYTES 4096 #define TUNE_BYTES 1536 static unsigned char *k; static unsigned char *n; static unsigned char *m; static unsigned char *c; static unsigned char *t; static unsigned char *k2; static unsigned char *n2; static unsigned char *m2; static unsigned char *c2; static unsigned char *t2; #define klen crypto_secretbox_KEYBYTES #define nlen crypto_secretbox_NONCEBYTES void preallocate(void) { } void allocate(void) { k = alignedcalloc(klen); n = alignedcalloc(nlen); m = alignedcalloc(MAXTEST_BYTES + crypto_secretbox_ZEROBYTES); c = alignedcalloc(MAXTEST_BYTES + crypto_secretbox_ZEROBYTES); t = alignedcalloc(MAXTEST_BYTES + crypto_secretbox_ZEROBYTES); k2 = alignedcalloc(klen); n2 = alignedcalloc(nlen); m2 = alignedcalloc(MAXTEST_BYTES + crypto_secretbox_ZEROBYTES); c2 = alignedcalloc(MAXTEST_BYTES + crypto_secretbox_ZEROBYTES); t2 = alignedcalloc(MAXTEST_BYTES + crypto_secretbox_ZEROBYTES); } void predoit(void) { } void doit(void) { crypto_secretbox(c,m,TUNE_BYTES + crypto_secretbox_ZEROBYTES,n,k); crypto_secretbox_open(t,c,TUNE_BYTES + crypto_secretbox_ZEROBYTES,n,k); } char checksum[klen * 2 + 1]; const char *checksum_compute(void) { long long i; long long j; for (j = 0;j < crypto_secretbox_ZEROBYTES;++j) m[j] = 0; for (i = 0;i < CHECKSUM_BYTES;++i) { long long mlen = i + crypto_secretbox_ZEROBYTES; long long tlen = i + crypto_secretbox_ZEROBYTES; long long clen = i + crypto_secretbox_ZEROBYTES; for (j = -16;j < 0;++j) k[j] = random(); for (j = -16;j < 0;++j) n[j] = random(); for (j = -16;j < 0;++j) m[j] = random(); for (j = klen;j < klen + 16;++j) k[j] = random(); for (j = nlen;j < nlen + 16;++j) n[j] = random(); for (j = mlen;j < mlen + 16;++j) m[j] = random(); for (j = -16;j < klen + 16;++j) k2[j] = k[j]; for (j = -16;j < nlen + 16;++j) n2[j] = n[j]; for (j = -16;j < mlen + 16;++j) m2[j] = m[j]; for (j = -16;j < clen + 16;++j) c2[j] = c[j] = random(); if (crypto_secretbox(c,m,mlen,n,k) != 0) return "crypto_secretbox returns nonzero"; for (j = -16;j < mlen + 16;++j) if (m2[j] != m[j]) return "crypto_secretbox overwrites m"; for (j = -16;j < nlen + 16;++j) if (n2[j] != n[j]) return "crypto_secretbox overwrites n"; for (j = -16;j < klen + 16;++j) if (k2[j] != k[j]) return "crypto_secretbox overwrites k"; for (j = -16;j < 0;++j) if (c2[j] != c[j]) return "crypto_secretbox writes before output"; for (j = clen;j < clen + 16;++j) if (c2[j] != c[j]) return "crypto_secretbox writes after output"; for (j = 0;j < crypto_secretbox_BOXZEROBYTES;++j) if (c[j] != 0) return "crypto_secretbox does not clear extra bytes"; for (j = -16;j < 0;++j) c[j] = random(); for (j = clen;j < clen + 16;++j) c[j] = random(); for (j = -16;j < clen + 16;++j) c2[j] = c[j]; for (j = -16;j < tlen + 16;++j) t2[j] = t[j] = random(); if (crypto_secretbox_open(t,c,clen,n,k) != 0) return "crypto_secretbox_open returns nonzero"; for (j = -16;j < clen + 16;++j) if (c2[j] != c[j]) return "crypto_secretbox_open overwrites c"; for (j = -16;j < nlen + 16;++j) if (n2[j] != n[j]) return "crypto_secretbox_open overwrites n"; for (j = -16;j < klen + 16;++j) if (k2[j] != k[j]) return "crypto_secretbox_open overwrites k"; for (j = -16;j < 0;++j) if (t2[j] != t[j]) return "crypto_secretbox_open writes before output"; for (j = tlen;j < tlen + 16;++j) if (t2[j] != t[j]) return "crypto_secretbox_open writes after output"; for (j = 0;j < crypto_secretbox_ZEROBYTES;++j) if (t[j] != 0) return "crypto_secretbox_open does not clear extra bytes"; for (j = 0;j < i;++j) if (t[j] != m[j]) return "plaintext does not match"; for (j = 0;j < i;++j) k[j % klen] ^= c[j + crypto_secretbox_BOXZEROBYTES]; crypto_secretbox(c,m,mlen,n,k); for (j = 0;j < i;++j) n[j % nlen] ^= c[j + crypto_secretbox_BOXZEROBYTES]; crypto_secretbox(c,m,mlen,n,k); if (i == 0) m[crypto_secretbox_ZEROBYTES + 0] = 0; m[crypto_secretbox_ZEROBYTES + i] = m[crypto_secretbox_ZEROBYTES + 0]; for (j = 0;j < i;++j) m[j + crypto_secretbox_ZEROBYTES] ^= c[j + crypto_secretbox_BOXZEROBYTES]; } for (i = 0;i < klen;++i) { checksum[2 * i] = "0123456789abcdef"[15 & (k[i] >> 4)]; checksum[2 * i + 1] = "0123456789abcdef"[15 & k[i]]; } checksum[2 * i] = 0; return 0; } curvedns-curvedns-0.87/nacl/crypto_secretbox/wrapper-box.cpp000066400000000000000000000014331150631715100244160ustar00rootroot00000000000000#include using std::string; #include "crypto_secretbox.h" string crypto_secretbox(const string &m,const string &n,const string &k) { if (k.size() != crypto_secretbox_KEYBYTES) throw "incorrect key length"; if (n.size() != crypto_secretbox_NONCEBYTES) throw "incorrect nonce length"; size_t mlen = m.size() + crypto_secretbox_ZEROBYTES; unsigned char mpad[mlen]; for (int i = 0;i < crypto_secretbox_ZEROBYTES;++i) mpad[i] = 0; for (int i = crypto_secretbox_ZEROBYTES;i < mlen;++i) mpad[i] = m[i - crypto_secretbox_ZEROBYTES]; unsigned char cpad[mlen]; crypto_secretbox(cpad,mpad,mlen,(const unsigned char *) n.c_str(),(const unsigned char *) k.c_str()); return string( (char *) cpad + crypto_secretbox_BOXZEROBYTES, mlen - crypto_secretbox_BOXZEROBYTES ); } curvedns-curvedns-0.87/nacl/crypto_secretbox/wrapper-open.cpp000066400000000000000000000017161150631715100245730ustar00rootroot00000000000000#include using std::string; #include "crypto_secretbox.h" string crypto_secretbox_open(const string &c,const string &n,const string &k) { if (k.size() != crypto_secretbox_KEYBYTES) throw "incorrect key length"; if (n.size() != crypto_secretbox_NONCEBYTES) throw "incorrect nonce length"; size_t clen = c.size() + crypto_secretbox_BOXZEROBYTES; unsigned char cpad[clen]; for (int i = 0;i < crypto_secretbox_BOXZEROBYTES;++i) cpad[i] = 0; for (int i = crypto_secretbox_BOXZEROBYTES;i < clen;++i) cpad[i] = c[i - crypto_secretbox_BOXZEROBYTES]; unsigned char mpad[clen]; if (crypto_secretbox_open(mpad,cpad,clen,(const unsigned char *) n.c_str(),(const unsigned char *) k.c_str()) != 0) throw "ciphertext fails verification"; if (clen < crypto_secretbox_ZEROBYTES) throw "ciphertext too short"; // should have been caught by _open return string( (char *) mpad + crypto_secretbox_ZEROBYTES, clen - crypto_secretbox_ZEROBYTES ); } curvedns-curvedns-0.87/nacl/crypto_secretbox/xsalsa20poly1305/000077500000000000000000000000001150631715100243155ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_secretbox/xsalsa20poly1305/checksum000066400000000000000000000001011150631715100260320ustar00rootroot00000000000000df372f95dd87381b7c9ceb6f340ccaa03d19bed5d9e4ab004d99d847675a9658 curvedns-curvedns-0.87/nacl/crypto_secretbox/xsalsa20poly1305/ref/000077500000000000000000000000001150631715100250715ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_secretbox/xsalsa20poly1305/ref/api.h000066400000000000000000000001631150631715100260130ustar00rootroot00000000000000#define CRYPTO_KEYBYTES 32 #define CRYPTO_NONCEBYTES 24 #define CRYPTO_ZEROBYTES 32 #define CRYPTO_BOXZEROBYTES 16 curvedns-curvedns-0.87/nacl/crypto_secretbox/xsalsa20poly1305/ref/box.c000066400000000000000000000015651150631715100260340ustar00rootroot00000000000000#include "crypto_onetimeauth_poly1305.h" #include "crypto_stream_xsalsa20.h" #include "crypto_secretbox.h" int crypto_secretbox( unsigned char *c, const unsigned char *m,unsigned long long mlen, const unsigned char *n, const unsigned char *k ) { int i; if (mlen < 32) return -1; crypto_stream_xsalsa20_xor(c,m,mlen,n,k); crypto_onetimeauth_poly1305(c + 16,c + 32,mlen - 32,c); for (i = 0;i < 16;++i) c[i] = 0; return 0; } int crypto_secretbox_open( unsigned char *m, const unsigned char *c,unsigned long long clen, const unsigned char *n, const unsigned char *k ) { int i; unsigned char subkey[32]; if (clen < 32) return -1; crypto_stream_xsalsa20(subkey,32,n,k); if (crypto_onetimeauth_poly1305_verify(c + 16,c + 32,clen - 32,subkey) != 0) return -1; crypto_stream_xsalsa20_xor(m,c,clen,n,k); for (i = 0;i < 32;++i) m[i] = 0; return 0; } curvedns-curvedns-0.87/nacl/crypto_secretbox/xsalsa20poly1305/selected000066400000000000000000000000001150631715100260160ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_secretbox/xsalsa20poly1305/used000066400000000000000000000000001150631715100251660ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_stream/000077500000000000000000000000001150631715100207405ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_stream/aes128ctr/000077500000000000000000000000001150631715100224545ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_stream/aes128ctr/checksum000066400000000000000000000000411150631715100241740ustar00rootroot000000000000006e9966897837aae181e93261ae88fdf0 curvedns-curvedns-0.87/nacl/crypto_stream/aes128ctr/core2/000077500000000000000000000000001150631715100234665ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_stream/aes128ctr/core2/afternm.s000066400000000000000000012407401150631715100253160ustar00rootroot00000000000000# Author: Emilia Käsper and Peter Schwabe # Date: 2009-03-19 # +2010.01.31: minor namespace modifications # Public domain .data .p2align 6 RCON: .int 0x00000000, 0x00000000, 0x00000000, 0xffffffff ROTB: .int 0x0c000000, 0x00000000, 0x04000000, 0x08000000 EXPB0: .int 0x03030303, 0x07070707, 0x0b0b0b0b, 0x0f0f0f0f CTRINC1: .int 0x00000001, 0x00000000, 0x00000000, 0x00000000 CTRINC2: .int 0x00000002, 0x00000000, 0x00000000, 0x00000000 CTRINC3: .int 0x00000003, 0x00000000, 0x00000000, 0x00000000 CTRINC4: .int 0x00000004, 0x00000000, 0x00000000, 0x00000000 CTRINC5: .int 0x00000005, 0x00000000, 0x00000000, 0x00000000 CTRINC6: .int 0x00000006, 0x00000000, 0x00000000, 0x00000000 CTRINC7: .int 0x00000007, 0x00000000, 0x00000000, 0x00000000 RCTRINC1: .int 0x00000000, 0x00000000, 0x00000000, 0x00000001 RCTRINC2: .int 0x00000000, 0x00000000, 0x00000000, 0x00000002 RCTRINC3: .int 0x00000000, 0x00000000, 0x00000000, 0x00000003 RCTRINC4: .int 0x00000000, 0x00000000, 0x00000000, 0x00000004 RCTRINC5: .int 0x00000000, 0x00000000, 0x00000000, 0x00000005 RCTRINC6: .int 0x00000000, 0x00000000, 0x00000000, 0x00000006 RCTRINC7: .int 0x00000000, 0x00000000, 0x00000000, 0x00000007 SWAP32: .int 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f M0SWAP: .quad 0x0105090d0004080c , 0x03070b0f02060a0e BS0: .quad 0x5555555555555555, 0x5555555555555555 BS1: .quad 0x3333333333333333, 0x3333333333333333 BS2: .quad 0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f ONE: .quad 0xffffffffffffffff, 0xffffffffffffffff M0: .quad 0x02060a0e03070b0f, 0x0004080c0105090d SRM0: .quad 0x0304090e00050a0f, 0x01060b0c0207080d SR: .quad 0x0504070600030201, 0x0f0e0d0c0a09080b # qhasm: int64 outp # qhasm: int64 len # qhasm: int64 np # qhasm: int64 c # qhasm: input outp # qhasm: input len # qhasm: input np # qhasm: input c # qhasm: int64 lensav # qhasm: int6464 xmm0 # qhasm: int6464 xmm1 # qhasm: int6464 xmm2 # qhasm: int6464 xmm3 # qhasm: int6464 xmm4 # qhasm: int6464 xmm5 # qhasm: int6464 xmm6 # qhasm: int6464 xmm7 # qhasm: int6464 xmm8 # qhasm: int6464 xmm9 # qhasm: int6464 xmm10 # qhasm: int6464 xmm11 # qhasm: int6464 xmm12 # qhasm: int6464 xmm13 # qhasm: int6464 xmm14 # qhasm: int6464 xmm15 # qhasm: int6464 t # qhasm: stack1024 bl # qhasm: stack128 nonce_stack # qhasm: int64 blp # qhasm: int64 b # qhasm: int64 tmp # qhasm: enter crypto_stream_aes128ctr_core2_afternm .text .p2align 5 .globl _crypto_stream_aes128ctr_core2_afternm .globl crypto_stream_aes128ctr_core2_afternm _crypto_stream_aes128ctr_core2_afternm: crypto_stream_aes128ctr_core2_afternm: mov %rsp,%r11 and $31,%r11 add $160,%r11 sub %r11,%rsp # qhasm: xmm0 = *(int128 *) (np + 0) # asm 1: movdqa 0(xmm0=int6464#1 # asm 2: movdqa 0(xmm0=%xmm0 movdqa 0(%rdx),%xmm0 # qhasm: nonce_stack = xmm0 # asm 1: movdqa nonce_stack=stack128#1 # asm 2: movdqa nonce_stack=0(%rsp) movdqa %xmm0,0(%rsp) # qhasm: np = &nonce_stack # asm 1: leaq np=int64#3 # asm 2: leaq np=%rdx leaq 0(%rsp),%rdx # qhasm: enc_block: ._enc_block: # qhasm: xmm0 = *(int128 *) (np + 0) # asm 1: movdqa 0(xmm0=int6464#1 # asm 2: movdqa 0(xmm0=%xmm0 movdqa 0(%rdx),%xmm0 # qhasm: xmm1 = xmm0 # asm 1: movdqa xmm1=int6464#2 # asm 2: movdqa xmm1=%xmm1 movdqa %xmm0,%xmm1 # qhasm: shuffle bytes of xmm1 by SWAP32 # asm 1: pshufb SWAP32,xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm1,%xmm2 # qhasm: xmm3 = xmm1 # asm 1: movdqa xmm3=int6464#4 # asm 2: movdqa xmm3=%xmm3 movdqa %xmm1,%xmm3 # qhasm: xmm4 = xmm1 # asm 1: movdqa xmm4=int6464#5 # asm 2: movdqa xmm4=%xmm4 movdqa %xmm1,%xmm4 # qhasm: xmm5 = xmm1 # asm 1: movdqa xmm5=int6464#6 # asm 2: movdqa xmm5=%xmm5 movdqa %xmm1,%xmm5 # qhasm: xmm6 = xmm1 # asm 1: movdqa xmm6=int6464#7 # asm 2: movdqa xmm6=%xmm6 movdqa %xmm1,%xmm6 # qhasm: xmm7 = xmm1 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm1,%xmm7 # qhasm: int32323232 xmm1 += RCTRINC1 # asm 1: paddd RCTRINC1,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm6,%xmm8 # qhasm: uint6464 xmm8 >>= 1 # asm 1: psrlq $1,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm4,%xmm8 # qhasm: uint6464 xmm8 >>= 1 # asm 1: psrlq $1,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm2,%xmm8 # qhasm: uint6464 xmm8 >>= 1 # asm 1: psrlq $1,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm0,%xmm8 # qhasm: uint6464 xmm8 >>= 1 # asm 1: psrlq $1,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm5,%xmm8 # qhasm: uint6464 xmm8 >>= 2 # asm 1: psrlq $2,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm4,%xmm8 # qhasm: uint6464 xmm8 >>= 2 # asm 1: psrlq $2,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm1,%xmm8 # qhasm: uint6464 xmm8 >>= 2 # asm 1: psrlq $2,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm0,%xmm8 # qhasm: uint6464 xmm8 >>= 2 # asm 1: psrlq $2,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm3,%xmm8 # qhasm: uint6464 xmm8 >>= 4 # asm 1: psrlq $4,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm2,%xmm8 # qhasm: uint6464 xmm8 >>= 4 # asm 1: psrlq $4,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm1,%xmm8 # qhasm: uint6464 xmm8 >>= 4 # asm 1: psrlq $4,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm0,%xmm8 # qhasm: uint6464 xmm8 >>= 4 # asm 1: psrlq $4,xmm11=int6464#9 # asm 2: movdqa xmm11=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm10 = xmm1 # asm 1: movdqa xmm10=int6464#10 # asm 2: movdqa xmm10=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm9 = xmm5 # asm 1: movdqa xmm9=int6464#11 # asm 2: movdqa xmm9=%xmm10 movdqa %xmm5,%xmm10 # qhasm: xmm13 = xmm2 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm2,%xmm11 # qhasm: xmm12 = xmm6 # asm 1: movdqa xmm12=int6464#13 # asm 2: movdqa xmm12=%xmm12 movdqa %xmm6,%xmm12 # qhasm: xmm11 ^= xmm4 # asm 1: pxor xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm8,%xmm13 # qhasm: xmm8 = xmm10 # asm 1: movdqa xmm8=int6464#15 # asm 2: movdqa xmm8=%xmm14 movdqa %xmm9,%xmm14 # qhasm: xmm15 = xmm11 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm8,%xmm15 # qhasm: xmm10 |= xmm9 # asm 1: por xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm3,%xmm10 # qhasm: xmm12 ^= xmm0 # asm 1: pxor xmm13=int6464#11 # asm 2: movdqa xmm13=%xmm10 movdqa %xmm7,%xmm10 # qhasm: xmm13 ^= xmm1 # asm 1: pxor xmm12=int6464#12 # asm 2: movdqa xmm12=%xmm11 movdqa %xmm5,%xmm11 # qhasm: xmm9 = xmm13 # asm 1: movdqa xmm9=int6464#13 # asm 2: movdqa xmm9=%xmm12 movdqa %xmm10,%xmm12 # qhasm: xmm12 ^= xmm6 # asm 1: pxor xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm2,%xmm10 # qhasm: xmm13 = xmm4 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm4,%xmm11 # qhasm: xmm14 = xmm1 # asm 1: movdqa xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm1,%xmm13 # qhasm: xmm15 = xmm7 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm7,%xmm15 # qhasm: xmm12 &= xmm3 # asm 1: pand xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm8,%xmm10 # qhasm: xmm12 ^= xmm10 # asm 1: pxor xmm14=int6464#12 # asm 2: movdqa xmm14=%xmm11 movdqa %xmm14,%xmm11 # qhasm: xmm14 ^= xmm11 # asm 1: pxor xmm15=int6464#14 # asm 2: movdqa xmm15=%xmm13 movdqa %xmm10,%xmm13 # qhasm: xmm15 &= xmm14 # asm 1: pand xmm13=int6464#16 # asm 2: movdqa xmm13=%xmm15 movdqa %xmm12,%xmm15 # qhasm: xmm13 ^= xmm8 # asm 1: pxor xmm10=int6464#9 # asm 2: movdqa xmm10=%xmm8 movdqa %xmm11,%xmm8 # qhasm: xmm10 ^= xmm13 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm6,%xmm8 # qhasm: xmm8 = xmm5 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm5,%xmm9 # qhasm: xmm10 = xmm15 # asm 1: movdqa xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm10 ^= xmm14 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm8 = xmm1 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm12 ^= xmm4 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm8=int6464#9 # asm 2: pshufd $0x93,xmm8=%xmm8 pshufd $0x93,%xmm0,%xmm8 # qhasm: xmm9 = shuffle dwords of xmm1 by 0x93 # asm 1: pshufd $0x93,xmm9=int6464#10 # asm 2: pshufd $0x93,xmm9=%xmm9 pshufd $0x93,%xmm1,%xmm9 # qhasm: xmm10 = shuffle dwords of xmm4 by 0x93 # asm 1: pshufd $0x93,xmm10=int6464#11 # asm 2: pshufd $0x93,xmm10=%xmm10 pshufd $0x93,%xmm4,%xmm10 # qhasm: xmm11 = shuffle dwords of xmm6 by 0x93 # asm 1: pshufd $0x93,xmm11=int6464#12 # asm 2: pshufd $0x93,xmm11=%xmm11 pshufd $0x93,%xmm6,%xmm11 # qhasm: xmm12 = shuffle dwords of xmm3 by 0x93 # asm 1: pshufd $0x93,xmm12=int6464#13 # asm 2: pshufd $0x93,xmm12=%xmm12 pshufd $0x93,%xmm3,%xmm12 # qhasm: xmm13 = shuffle dwords of xmm7 by 0x93 # asm 1: pshufd $0x93,xmm13=int6464#14 # asm 2: pshufd $0x93,xmm13=%xmm13 pshufd $0x93,%xmm7,%xmm13 # qhasm: xmm14 = shuffle dwords of xmm2 by 0x93 # asm 1: pshufd $0x93,xmm14=int6464#15 # asm 2: pshufd $0x93,xmm14=%xmm14 pshufd $0x93,%xmm2,%xmm14 # qhasm: xmm15 = shuffle dwords of xmm5 by 0x93 # asm 1: pshufd $0x93,xmm15=int6464#16 # asm 2: pshufd $0x93,xmm15=%xmm15 pshufd $0x93,%xmm5,%xmm15 # qhasm: xmm0 ^= xmm8 # asm 1: pxor xmm0=int6464#1 # asm 2: pshufd $0x4E,xmm0=%xmm0 pshufd $0x4E,%xmm0,%xmm0 # qhasm: xmm1 = shuffle dwords of xmm1 by 0x4E # asm 1: pshufd $0x4E,xmm1=int6464#2 # asm 2: pshufd $0x4E,xmm1=%xmm1 pshufd $0x4E,%xmm1,%xmm1 # qhasm: xmm4 = shuffle dwords of xmm4 by 0x4E # asm 1: pshufd $0x4E,xmm4=int6464#5 # asm 2: pshufd $0x4E,xmm4=%xmm4 pshufd $0x4E,%xmm4,%xmm4 # qhasm: xmm6 = shuffle dwords of xmm6 by 0x4E # asm 1: pshufd $0x4E,xmm6=int6464#7 # asm 2: pshufd $0x4E,xmm6=%xmm6 pshufd $0x4E,%xmm6,%xmm6 # qhasm: xmm3 = shuffle dwords of xmm3 by 0x4E # asm 1: pshufd $0x4E,xmm3=int6464#4 # asm 2: pshufd $0x4E,xmm3=%xmm3 pshufd $0x4E,%xmm3,%xmm3 # qhasm: xmm7 = shuffle dwords of xmm7 by 0x4E # asm 1: pshufd $0x4E,xmm7=int6464#8 # asm 2: pshufd $0x4E,xmm7=%xmm7 pshufd $0x4E,%xmm7,%xmm7 # qhasm: xmm2 = shuffle dwords of xmm2 by 0x4E # asm 1: pshufd $0x4E,xmm2=int6464#3 # asm 2: pshufd $0x4E,xmm2=%xmm2 pshufd $0x4E,%xmm2,%xmm2 # qhasm: xmm5 = shuffle dwords of xmm5 by 0x4E # asm 1: pshufd $0x4E,xmm5=int6464#6 # asm 2: pshufd $0x4E,xmm5=%xmm5 pshufd $0x4E,%xmm5,%xmm5 # qhasm: xmm8 ^= xmm0 # asm 1: pxor xmm3=int6464#1 # asm 2: movdqa xmm3=%xmm0 movdqa %xmm15,%xmm0 # qhasm: xmm2 = xmm9 # asm 1: movdqa xmm2=int6464#2 # asm 2: movdqa xmm2=%xmm1 movdqa %xmm9,%xmm1 # qhasm: xmm1 = xmm13 # asm 1: movdqa xmm1=int6464#3 # asm 2: movdqa xmm1=%xmm2 movdqa %xmm13,%xmm2 # qhasm: xmm5 = xmm10 # asm 1: movdqa xmm5=int6464#4 # asm 2: movdqa xmm5=%xmm3 movdqa %xmm10,%xmm3 # qhasm: xmm4 = xmm14 # asm 1: movdqa xmm4=int6464#5 # asm 2: movdqa xmm4=%xmm4 movdqa %xmm14,%xmm4 # qhasm: xmm3 ^= xmm12 # asm 1: pxor xmm6=int6464#6 # asm 2: movdqa xmm6=%xmm5 movdqa %xmm0,%xmm5 # qhasm: xmm0 = xmm2 # asm 1: movdqa xmm0=int6464#7 # asm 2: movdqa xmm0=%xmm6 movdqa %xmm1,%xmm6 # qhasm: xmm7 = xmm3 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm0,%xmm7 # qhasm: xmm2 |= xmm1 # asm 1: por xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm11,%xmm2 # qhasm: xmm4 ^= xmm8 # asm 1: pxor xmm5=int6464#3 # asm 2: movdqa xmm5=%xmm2 movdqa %xmm15,%xmm2 # qhasm: xmm5 ^= xmm9 # asm 1: pxor xmm4=int6464#4 # asm 2: movdqa xmm4=%xmm3 movdqa %xmm13,%xmm3 # qhasm: xmm1 = xmm5 # asm 1: movdqa xmm1=int6464#5 # asm 2: movdqa xmm1=%xmm4 movdqa %xmm2,%xmm4 # qhasm: xmm4 ^= xmm14 # asm 1: pxor xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm10,%xmm2 # qhasm: xmm5 = xmm12 # asm 1: movdqa xmm5=int6464#4 # asm 2: movdqa xmm5=%xmm3 movdqa %xmm12,%xmm3 # qhasm: xmm6 = xmm9 # asm 1: movdqa xmm6=int6464#6 # asm 2: movdqa xmm6=%xmm5 movdqa %xmm9,%xmm5 # qhasm: xmm7 = xmm15 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm15,%xmm7 # qhasm: xmm4 &= xmm11 # asm 1: pand xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm0,%xmm2 # qhasm: xmm4 ^= xmm2 # asm 1: pxor xmm6=int6464#4 # asm 2: movdqa xmm6=%xmm3 movdqa %xmm6,%xmm3 # qhasm: xmm6 ^= xmm3 # asm 1: pxor xmm7=int6464#6 # asm 2: movdqa xmm7=%xmm5 movdqa %xmm2,%xmm5 # qhasm: xmm7 &= xmm6 # asm 1: pand xmm5=int6464#8 # asm 2: movdqa xmm5=%xmm7 movdqa %xmm4,%xmm7 # qhasm: xmm5 ^= xmm0 # asm 1: pxor xmm2=int6464#1 # asm 2: movdqa xmm2=%xmm0 movdqa %xmm3,%xmm0 # qhasm: xmm2 ^= xmm5 # asm 1: pxor xmm4=int6464#1 # asm 2: movdqa xmm4=%xmm0 movdqa %xmm14,%xmm0 # qhasm: xmm0 = xmm13 # asm 1: movdqa xmm0=int6464#2 # asm 2: movdqa xmm0=%xmm1 movdqa %xmm13,%xmm1 # qhasm: xmm2 = xmm7 # asm 1: movdqa xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm2 ^= xmm6 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm7,%xmm2 # qhasm: xmm2 ^= xmm1 # asm 1: pxor xmm4=int6464#1 # asm 2: movdqa xmm4=%xmm0 movdqa %xmm15,%xmm0 # qhasm: xmm0 = xmm9 # asm 1: movdqa xmm0=int6464#2 # asm 2: movdqa xmm0=%xmm1 movdqa %xmm9,%xmm1 # qhasm: xmm4 ^= xmm12 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm7,%xmm2 # qhasm: xmm2 ^= xmm1 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm0=int6464#1 # asm 2: pshufd $0x93,xmm0=%xmm0 pshufd $0x93,%xmm8,%xmm0 # qhasm: xmm1 = shuffle dwords of xmm9 by 0x93 # asm 1: pshufd $0x93,xmm1=int6464#2 # asm 2: pshufd $0x93,xmm1=%xmm1 pshufd $0x93,%xmm9,%xmm1 # qhasm: xmm2 = shuffle dwords of xmm12 by 0x93 # asm 1: pshufd $0x93,xmm2=int6464#3 # asm 2: pshufd $0x93,xmm2=%xmm2 pshufd $0x93,%xmm12,%xmm2 # qhasm: xmm3 = shuffle dwords of xmm14 by 0x93 # asm 1: pshufd $0x93,xmm3=int6464#4 # asm 2: pshufd $0x93,xmm3=%xmm3 pshufd $0x93,%xmm14,%xmm3 # qhasm: xmm4 = shuffle dwords of xmm11 by 0x93 # asm 1: pshufd $0x93,xmm4=int6464#5 # asm 2: pshufd $0x93,xmm4=%xmm4 pshufd $0x93,%xmm11,%xmm4 # qhasm: xmm5 = shuffle dwords of xmm15 by 0x93 # asm 1: pshufd $0x93,xmm5=int6464#6 # asm 2: pshufd $0x93,xmm5=%xmm5 pshufd $0x93,%xmm15,%xmm5 # qhasm: xmm6 = shuffle dwords of xmm10 by 0x93 # asm 1: pshufd $0x93,xmm6=int6464#7 # asm 2: pshufd $0x93,xmm6=%xmm6 pshufd $0x93,%xmm10,%xmm6 # qhasm: xmm7 = shuffle dwords of xmm13 by 0x93 # asm 1: pshufd $0x93,xmm7=int6464#8 # asm 2: pshufd $0x93,xmm7=%xmm7 pshufd $0x93,%xmm13,%xmm7 # qhasm: xmm8 ^= xmm0 # asm 1: pxor xmm8=int6464#9 # asm 2: pshufd $0x4E,xmm8=%xmm8 pshufd $0x4E,%xmm8,%xmm8 # qhasm: xmm9 = shuffle dwords of xmm9 by 0x4E # asm 1: pshufd $0x4E,xmm9=int6464#10 # asm 2: pshufd $0x4E,xmm9=%xmm9 pshufd $0x4E,%xmm9,%xmm9 # qhasm: xmm12 = shuffle dwords of xmm12 by 0x4E # asm 1: pshufd $0x4E,xmm12=int6464#13 # asm 2: pshufd $0x4E,xmm12=%xmm12 pshufd $0x4E,%xmm12,%xmm12 # qhasm: xmm14 = shuffle dwords of xmm14 by 0x4E # asm 1: pshufd $0x4E,xmm14=int6464#15 # asm 2: pshufd $0x4E,xmm14=%xmm14 pshufd $0x4E,%xmm14,%xmm14 # qhasm: xmm11 = shuffle dwords of xmm11 by 0x4E # asm 1: pshufd $0x4E,xmm11=int6464#12 # asm 2: pshufd $0x4E,xmm11=%xmm11 pshufd $0x4E,%xmm11,%xmm11 # qhasm: xmm15 = shuffle dwords of xmm15 by 0x4E # asm 1: pshufd $0x4E,xmm15=int6464#16 # asm 2: pshufd $0x4E,xmm15=%xmm15 pshufd $0x4E,%xmm15,%xmm15 # qhasm: xmm10 = shuffle dwords of xmm10 by 0x4E # asm 1: pshufd $0x4E,xmm10=int6464#11 # asm 2: pshufd $0x4E,xmm10=%xmm10 pshufd $0x4E,%xmm10,%xmm10 # qhasm: xmm13 = shuffle dwords of xmm13 by 0x4E # asm 1: pshufd $0x4E,xmm13=int6464#14 # asm 2: pshufd $0x4E,xmm13=%xmm13 pshufd $0x4E,%xmm13,%xmm13 # qhasm: xmm0 ^= xmm8 # asm 1: pxor xmm11=int6464#9 # asm 2: movdqa xmm11=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm10 = xmm1 # asm 1: movdqa xmm10=int6464#10 # asm 2: movdqa xmm10=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm9 = xmm5 # asm 1: movdqa xmm9=int6464#11 # asm 2: movdqa xmm9=%xmm10 movdqa %xmm5,%xmm10 # qhasm: xmm13 = xmm2 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm2,%xmm11 # qhasm: xmm12 = xmm6 # asm 1: movdqa xmm12=int6464#13 # asm 2: movdqa xmm12=%xmm12 movdqa %xmm6,%xmm12 # qhasm: xmm11 ^= xmm4 # asm 1: pxor xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm8,%xmm13 # qhasm: xmm8 = xmm10 # asm 1: movdqa xmm8=int6464#15 # asm 2: movdqa xmm8=%xmm14 movdqa %xmm9,%xmm14 # qhasm: xmm15 = xmm11 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm8,%xmm15 # qhasm: xmm10 |= xmm9 # asm 1: por xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm3,%xmm10 # qhasm: xmm12 ^= xmm0 # asm 1: pxor xmm13=int6464#11 # asm 2: movdqa xmm13=%xmm10 movdqa %xmm7,%xmm10 # qhasm: xmm13 ^= xmm1 # asm 1: pxor xmm12=int6464#12 # asm 2: movdqa xmm12=%xmm11 movdqa %xmm5,%xmm11 # qhasm: xmm9 = xmm13 # asm 1: movdqa xmm9=int6464#13 # asm 2: movdqa xmm9=%xmm12 movdqa %xmm10,%xmm12 # qhasm: xmm12 ^= xmm6 # asm 1: pxor xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm2,%xmm10 # qhasm: xmm13 = xmm4 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm4,%xmm11 # qhasm: xmm14 = xmm1 # asm 1: movdqa xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm1,%xmm13 # qhasm: xmm15 = xmm7 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm7,%xmm15 # qhasm: xmm12 &= xmm3 # asm 1: pand xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm8,%xmm10 # qhasm: xmm12 ^= xmm10 # asm 1: pxor xmm14=int6464#12 # asm 2: movdqa xmm14=%xmm11 movdqa %xmm14,%xmm11 # qhasm: xmm14 ^= xmm11 # asm 1: pxor xmm15=int6464#14 # asm 2: movdqa xmm15=%xmm13 movdqa %xmm10,%xmm13 # qhasm: xmm15 &= xmm14 # asm 1: pand xmm13=int6464#16 # asm 2: movdqa xmm13=%xmm15 movdqa %xmm12,%xmm15 # qhasm: xmm13 ^= xmm8 # asm 1: pxor xmm10=int6464#9 # asm 2: movdqa xmm10=%xmm8 movdqa %xmm11,%xmm8 # qhasm: xmm10 ^= xmm13 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm6,%xmm8 # qhasm: xmm8 = xmm5 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm5,%xmm9 # qhasm: xmm10 = xmm15 # asm 1: movdqa xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm10 ^= xmm14 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm8 = xmm1 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm12 ^= xmm4 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm8=int6464#9 # asm 2: pshufd $0x93,xmm8=%xmm8 pshufd $0x93,%xmm0,%xmm8 # qhasm: xmm9 = shuffle dwords of xmm1 by 0x93 # asm 1: pshufd $0x93,xmm9=int6464#10 # asm 2: pshufd $0x93,xmm9=%xmm9 pshufd $0x93,%xmm1,%xmm9 # qhasm: xmm10 = shuffle dwords of xmm4 by 0x93 # asm 1: pshufd $0x93,xmm10=int6464#11 # asm 2: pshufd $0x93,xmm10=%xmm10 pshufd $0x93,%xmm4,%xmm10 # qhasm: xmm11 = shuffle dwords of xmm6 by 0x93 # asm 1: pshufd $0x93,xmm11=int6464#12 # asm 2: pshufd $0x93,xmm11=%xmm11 pshufd $0x93,%xmm6,%xmm11 # qhasm: xmm12 = shuffle dwords of xmm3 by 0x93 # asm 1: pshufd $0x93,xmm12=int6464#13 # asm 2: pshufd $0x93,xmm12=%xmm12 pshufd $0x93,%xmm3,%xmm12 # qhasm: xmm13 = shuffle dwords of xmm7 by 0x93 # asm 1: pshufd $0x93,xmm13=int6464#14 # asm 2: pshufd $0x93,xmm13=%xmm13 pshufd $0x93,%xmm7,%xmm13 # qhasm: xmm14 = shuffle dwords of xmm2 by 0x93 # asm 1: pshufd $0x93,xmm14=int6464#15 # asm 2: pshufd $0x93,xmm14=%xmm14 pshufd $0x93,%xmm2,%xmm14 # qhasm: xmm15 = shuffle dwords of xmm5 by 0x93 # asm 1: pshufd $0x93,xmm15=int6464#16 # asm 2: pshufd $0x93,xmm15=%xmm15 pshufd $0x93,%xmm5,%xmm15 # qhasm: xmm0 ^= xmm8 # asm 1: pxor xmm0=int6464#1 # asm 2: pshufd $0x4E,xmm0=%xmm0 pshufd $0x4E,%xmm0,%xmm0 # qhasm: xmm1 = shuffle dwords of xmm1 by 0x4E # asm 1: pshufd $0x4E,xmm1=int6464#2 # asm 2: pshufd $0x4E,xmm1=%xmm1 pshufd $0x4E,%xmm1,%xmm1 # qhasm: xmm4 = shuffle dwords of xmm4 by 0x4E # asm 1: pshufd $0x4E,xmm4=int6464#5 # asm 2: pshufd $0x4E,xmm4=%xmm4 pshufd $0x4E,%xmm4,%xmm4 # qhasm: xmm6 = shuffle dwords of xmm6 by 0x4E # asm 1: pshufd $0x4E,xmm6=int6464#7 # asm 2: pshufd $0x4E,xmm6=%xmm6 pshufd $0x4E,%xmm6,%xmm6 # qhasm: xmm3 = shuffle dwords of xmm3 by 0x4E # asm 1: pshufd $0x4E,xmm3=int6464#4 # asm 2: pshufd $0x4E,xmm3=%xmm3 pshufd $0x4E,%xmm3,%xmm3 # qhasm: xmm7 = shuffle dwords of xmm7 by 0x4E # asm 1: pshufd $0x4E,xmm7=int6464#8 # asm 2: pshufd $0x4E,xmm7=%xmm7 pshufd $0x4E,%xmm7,%xmm7 # qhasm: xmm2 = shuffle dwords of xmm2 by 0x4E # asm 1: pshufd $0x4E,xmm2=int6464#3 # asm 2: pshufd $0x4E,xmm2=%xmm2 pshufd $0x4E,%xmm2,%xmm2 # qhasm: xmm5 = shuffle dwords of xmm5 by 0x4E # asm 1: pshufd $0x4E,xmm5=int6464#6 # asm 2: pshufd $0x4E,xmm5=%xmm5 pshufd $0x4E,%xmm5,%xmm5 # qhasm: xmm8 ^= xmm0 # asm 1: pxor xmm3=int6464#1 # asm 2: movdqa xmm3=%xmm0 movdqa %xmm15,%xmm0 # qhasm: xmm2 = xmm9 # asm 1: movdqa xmm2=int6464#2 # asm 2: movdqa xmm2=%xmm1 movdqa %xmm9,%xmm1 # qhasm: xmm1 = xmm13 # asm 1: movdqa xmm1=int6464#3 # asm 2: movdqa xmm1=%xmm2 movdqa %xmm13,%xmm2 # qhasm: xmm5 = xmm10 # asm 1: movdqa xmm5=int6464#4 # asm 2: movdqa xmm5=%xmm3 movdqa %xmm10,%xmm3 # qhasm: xmm4 = xmm14 # asm 1: movdqa xmm4=int6464#5 # asm 2: movdqa xmm4=%xmm4 movdqa %xmm14,%xmm4 # qhasm: xmm3 ^= xmm12 # asm 1: pxor xmm6=int6464#6 # asm 2: movdqa xmm6=%xmm5 movdqa %xmm0,%xmm5 # qhasm: xmm0 = xmm2 # asm 1: movdqa xmm0=int6464#7 # asm 2: movdqa xmm0=%xmm6 movdqa %xmm1,%xmm6 # qhasm: xmm7 = xmm3 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm0,%xmm7 # qhasm: xmm2 |= xmm1 # asm 1: por xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm11,%xmm2 # qhasm: xmm4 ^= xmm8 # asm 1: pxor xmm5=int6464#3 # asm 2: movdqa xmm5=%xmm2 movdqa %xmm15,%xmm2 # qhasm: xmm5 ^= xmm9 # asm 1: pxor xmm4=int6464#4 # asm 2: movdqa xmm4=%xmm3 movdqa %xmm13,%xmm3 # qhasm: xmm1 = xmm5 # asm 1: movdqa xmm1=int6464#5 # asm 2: movdqa xmm1=%xmm4 movdqa %xmm2,%xmm4 # qhasm: xmm4 ^= xmm14 # asm 1: pxor xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm10,%xmm2 # qhasm: xmm5 = xmm12 # asm 1: movdqa xmm5=int6464#4 # asm 2: movdqa xmm5=%xmm3 movdqa %xmm12,%xmm3 # qhasm: xmm6 = xmm9 # asm 1: movdqa xmm6=int6464#6 # asm 2: movdqa xmm6=%xmm5 movdqa %xmm9,%xmm5 # qhasm: xmm7 = xmm15 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm15,%xmm7 # qhasm: xmm4 &= xmm11 # asm 1: pand xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm0,%xmm2 # qhasm: xmm4 ^= xmm2 # asm 1: pxor xmm6=int6464#4 # asm 2: movdqa xmm6=%xmm3 movdqa %xmm6,%xmm3 # qhasm: xmm6 ^= xmm3 # asm 1: pxor xmm7=int6464#6 # asm 2: movdqa xmm7=%xmm5 movdqa %xmm2,%xmm5 # qhasm: xmm7 &= xmm6 # asm 1: pand xmm5=int6464#8 # asm 2: movdqa xmm5=%xmm7 movdqa %xmm4,%xmm7 # qhasm: xmm5 ^= xmm0 # asm 1: pxor xmm2=int6464#1 # asm 2: movdqa xmm2=%xmm0 movdqa %xmm3,%xmm0 # qhasm: xmm2 ^= xmm5 # asm 1: pxor xmm4=int6464#1 # asm 2: movdqa xmm4=%xmm0 movdqa %xmm14,%xmm0 # qhasm: xmm0 = xmm13 # asm 1: movdqa xmm0=int6464#2 # asm 2: movdqa xmm0=%xmm1 movdqa %xmm13,%xmm1 # qhasm: xmm2 = xmm7 # asm 1: movdqa xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm2 ^= xmm6 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm7,%xmm2 # qhasm: xmm2 ^= xmm1 # asm 1: pxor xmm4=int6464#1 # asm 2: movdqa xmm4=%xmm0 movdqa %xmm15,%xmm0 # qhasm: xmm0 = xmm9 # asm 1: movdqa xmm0=int6464#2 # asm 2: movdqa xmm0=%xmm1 movdqa %xmm9,%xmm1 # qhasm: xmm4 ^= xmm12 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm7,%xmm2 # qhasm: xmm2 ^= xmm1 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm0=int6464#1 # asm 2: pshufd $0x93,xmm0=%xmm0 pshufd $0x93,%xmm8,%xmm0 # qhasm: xmm1 = shuffle dwords of xmm9 by 0x93 # asm 1: pshufd $0x93,xmm1=int6464#2 # asm 2: pshufd $0x93,xmm1=%xmm1 pshufd $0x93,%xmm9,%xmm1 # qhasm: xmm2 = shuffle dwords of xmm12 by 0x93 # asm 1: pshufd $0x93,xmm2=int6464#3 # asm 2: pshufd $0x93,xmm2=%xmm2 pshufd $0x93,%xmm12,%xmm2 # qhasm: xmm3 = shuffle dwords of xmm14 by 0x93 # asm 1: pshufd $0x93,xmm3=int6464#4 # asm 2: pshufd $0x93,xmm3=%xmm3 pshufd $0x93,%xmm14,%xmm3 # qhasm: xmm4 = shuffle dwords of xmm11 by 0x93 # asm 1: pshufd $0x93,xmm4=int6464#5 # asm 2: pshufd $0x93,xmm4=%xmm4 pshufd $0x93,%xmm11,%xmm4 # qhasm: xmm5 = shuffle dwords of xmm15 by 0x93 # asm 1: pshufd $0x93,xmm5=int6464#6 # asm 2: pshufd $0x93,xmm5=%xmm5 pshufd $0x93,%xmm15,%xmm5 # qhasm: xmm6 = shuffle dwords of xmm10 by 0x93 # asm 1: pshufd $0x93,xmm6=int6464#7 # asm 2: pshufd $0x93,xmm6=%xmm6 pshufd $0x93,%xmm10,%xmm6 # qhasm: xmm7 = shuffle dwords of xmm13 by 0x93 # asm 1: pshufd $0x93,xmm7=int6464#8 # asm 2: pshufd $0x93,xmm7=%xmm7 pshufd $0x93,%xmm13,%xmm7 # qhasm: xmm8 ^= xmm0 # asm 1: pxor xmm8=int6464#9 # asm 2: pshufd $0x4E,xmm8=%xmm8 pshufd $0x4E,%xmm8,%xmm8 # qhasm: xmm9 = shuffle dwords of xmm9 by 0x4E # asm 1: pshufd $0x4E,xmm9=int6464#10 # asm 2: pshufd $0x4E,xmm9=%xmm9 pshufd $0x4E,%xmm9,%xmm9 # qhasm: xmm12 = shuffle dwords of xmm12 by 0x4E # asm 1: pshufd $0x4E,xmm12=int6464#13 # asm 2: pshufd $0x4E,xmm12=%xmm12 pshufd $0x4E,%xmm12,%xmm12 # qhasm: xmm14 = shuffle dwords of xmm14 by 0x4E # asm 1: pshufd $0x4E,xmm14=int6464#15 # asm 2: pshufd $0x4E,xmm14=%xmm14 pshufd $0x4E,%xmm14,%xmm14 # qhasm: xmm11 = shuffle dwords of xmm11 by 0x4E # asm 1: pshufd $0x4E,xmm11=int6464#12 # asm 2: pshufd $0x4E,xmm11=%xmm11 pshufd $0x4E,%xmm11,%xmm11 # qhasm: xmm15 = shuffle dwords of xmm15 by 0x4E # asm 1: pshufd $0x4E,xmm15=int6464#16 # asm 2: pshufd $0x4E,xmm15=%xmm15 pshufd $0x4E,%xmm15,%xmm15 # qhasm: xmm10 = shuffle dwords of xmm10 by 0x4E # asm 1: pshufd $0x4E,xmm10=int6464#11 # asm 2: pshufd $0x4E,xmm10=%xmm10 pshufd $0x4E,%xmm10,%xmm10 # qhasm: xmm13 = shuffle dwords of xmm13 by 0x4E # asm 1: pshufd $0x4E,xmm13=int6464#14 # asm 2: pshufd $0x4E,xmm13=%xmm13 pshufd $0x4E,%xmm13,%xmm13 # qhasm: xmm0 ^= xmm8 # asm 1: pxor xmm11=int6464#9 # asm 2: movdqa xmm11=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm10 = xmm1 # asm 1: movdqa xmm10=int6464#10 # asm 2: movdqa xmm10=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm9 = xmm5 # asm 1: movdqa xmm9=int6464#11 # asm 2: movdqa xmm9=%xmm10 movdqa %xmm5,%xmm10 # qhasm: xmm13 = xmm2 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm2,%xmm11 # qhasm: xmm12 = xmm6 # asm 1: movdqa xmm12=int6464#13 # asm 2: movdqa xmm12=%xmm12 movdqa %xmm6,%xmm12 # qhasm: xmm11 ^= xmm4 # asm 1: pxor xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm8,%xmm13 # qhasm: xmm8 = xmm10 # asm 1: movdqa xmm8=int6464#15 # asm 2: movdqa xmm8=%xmm14 movdqa %xmm9,%xmm14 # qhasm: xmm15 = xmm11 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm8,%xmm15 # qhasm: xmm10 |= xmm9 # asm 1: por xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm3,%xmm10 # qhasm: xmm12 ^= xmm0 # asm 1: pxor xmm13=int6464#11 # asm 2: movdqa xmm13=%xmm10 movdqa %xmm7,%xmm10 # qhasm: xmm13 ^= xmm1 # asm 1: pxor xmm12=int6464#12 # asm 2: movdqa xmm12=%xmm11 movdqa %xmm5,%xmm11 # qhasm: xmm9 = xmm13 # asm 1: movdqa xmm9=int6464#13 # asm 2: movdqa xmm9=%xmm12 movdqa %xmm10,%xmm12 # qhasm: xmm12 ^= xmm6 # asm 1: pxor xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm2,%xmm10 # qhasm: xmm13 = xmm4 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm4,%xmm11 # qhasm: xmm14 = xmm1 # asm 1: movdqa xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm1,%xmm13 # qhasm: xmm15 = xmm7 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm7,%xmm15 # qhasm: xmm12 &= xmm3 # asm 1: pand xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm8,%xmm10 # qhasm: xmm12 ^= xmm10 # asm 1: pxor xmm14=int6464#12 # asm 2: movdqa xmm14=%xmm11 movdqa %xmm14,%xmm11 # qhasm: xmm14 ^= xmm11 # asm 1: pxor xmm15=int6464#14 # asm 2: movdqa xmm15=%xmm13 movdqa %xmm10,%xmm13 # qhasm: xmm15 &= xmm14 # asm 1: pand xmm13=int6464#16 # asm 2: movdqa xmm13=%xmm15 movdqa %xmm12,%xmm15 # qhasm: xmm13 ^= xmm8 # asm 1: pxor xmm10=int6464#9 # asm 2: movdqa xmm10=%xmm8 movdqa %xmm11,%xmm8 # qhasm: xmm10 ^= xmm13 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm6,%xmm8 # qhasm: xmm8 = xmm5 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm5,%xmm9 # qhasm: xmm10 = xmm15 # asm 1: movdqa xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm10 ^= xmm14 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm8 = xmm1 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm12 ^= xmm4 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm8=int6464#9 # asm 2: pshufd $0x93,xmm8=%xmm8 pshufd $0x93,%xmm0,%xmm8 # qhasm: xmm9 = shuffle dwords of xmm1 by 0x93 # asm 1: pshufd $0x93,xmm9=int6464#10 # asm 2: pshufd $0x93,xmm9=%xmm9 pshufd $0x93,%xmm1,%xmm9 # qhasm: xmm10 = shuffle dwords of xmm4 by 0x93 # asm 1: pshufd $0x93,xmm10=int6464#11 # asm 2: pshufd $0x93,xmm10=%xmm10 pshufd $0x93,%xmm4,%xmm10 # qhasm: xmm11 = shuffle dwords of xmm6 by 0x93 # asm 1: pshufd $0x93,xmm11=int6464#12 # asm 2: pshufd $0x93,xmm11=%xmm11 pshufd $0x93,%xmm6,%xmm11 # qhasm: xmm12 = shuffle dwords of xmm3 by 0x93 # asm 1: pshufd $0x93,xmm12=int6464#13 # asm 2: pshufd $0x93,xmm12=%xmm12 pshufd $0x93,%xmm3,%xmm12 # qhasm: xmm13 = shuffle dwords of xmm7 by 0x93 # asm 1: pshufd $0x93,xmm13=int6464#14 # asm 2: pshufd $0x93,xmm13=%xmm13 pshufd $0x93,%xmm7,%xmm13 # qhasm: xmm14 = shuffle dwords of xmm2 by 0x93 # asm 1: pshufd $0x93,xmm14=int6464#15 # asm 2: pshufd $0x93,xmm14=%xmm14 pshufd $0x93,%xmm2,%xmm14 # qhasm: xmm15 = shuffle dwords of xmm5 by 0x93 # asm 1: pshufd $0x93,xmm15=int6464#16 # asm 2: pshufd $0x93,xmm15=%xmm15 pshufd $0x93,%xmm5,%xmm15 # qhasm: xmm0 ^= xmm8 # asm 1: pxor xmm0=int6464#1 # asm 2: pshufd $0x4E,xmm0=%xmm0 pshufd $0x4E,%xmm0,%xmm0 # qhasm: xmm1 = shuffle dwords of xmm1 by 0x4E # asm 1: pshufd $0x4E,xmm1=int6464#2 # asm 2: pshufd $0x4E,xmm1=%xmm1 pshufd $0x4E,%xmm1,%xmm1 # qhasm: xmm4 = shuffle dwords of xmm4 by 0x4E # asm 1: pshufd $0x4E,xmm4=int6464#5 # asm 2: pshufd $0x4E,xmm4=%xmm4 pshufd $0x4E,%xmm4,%xmm4 # qhasm: xmm6 = shuffle dwords of xmm6 by 0x4E # asm 1: pshufd $0x4E,xmm6=int6464#7 # asm 2: pshufd $0x4E,xmm6=%xmm6 pshufd $0x4E,%xmm6,%xmm6 # qhasm: xmm3 = shuffle dwords of xmm3 by 0x4E # asm 1: pshufd $0x4E,xmm3=int6464#4 # asm 2: pshufd $0x4E,xmm3=%xmm3 pshufd $0x4E,%xmm3,%xmm3 # qhasm: xmm7 = shuffle dwords of xmm7 by 0x4E # asm 1: pshufd $0x4E,xmm7=int6464#8 # asm 2: pshufd $0x4E,xmm7=%xmm7 pshufd $0x4E,%xmm7,%xmm7 # qhasm: xmm2 = shuffle dwords of xmm2 by 0x4E # asm 1: pshufd $0x4E,xmm2=int6464#3 # asm 2: pshufd $0x4E,xmm2=%xmm2 pshufd $0x4E,%xmm2,%xmm2 # qhasm: xmm5 = shuffle dwords of xmm5 by 0x4E # asm 1: pshufd $0x4E,xmm5=int6464#6 # asm 2: pshufd $0x4E,xmm5=%xmm5 pshufd $0x4E,%xmm5,%xmm5 # qhasm: xmm8 ^= xmm0 # asm 1: pxor xmm3=int6464#1 # asm 2: movdqa xmm3=%xmm0 movdqa %xmm15,%xmm0 # qhasm: xmm2 = xmm9 # asm 1: movdqa xmm2=int6464#2 # asm 2: movdqa xmm2=%xmm1 movdqa %xmm9,%xmm1 # qhasm: xmm1 = xmm13 # asm 1: movdqa xmm1=int6464#3 # asm 2: movdqa xmm1=%xmm2 movdqa %xmm13,%xmm2 # qhasm: xmm5 = xmm10 # asm 1: movdqa xmm5=int6464#4 # asm 2: movdqa xmm5=%xmm3 movdqa %xmm10,%xmm3 # qhasm: xmm4 = xmm14 # asm 1: movdqa xmm4=int6464#5 # asm 2: movdqa xmm4=%xmm4 movdqa %xmm14,%xmm4 # qhasm: xmm3 ^= xmm12 # asm 1: pxor xmm6=int6464#6 # asm 2: movdqa xmm6=%xmm5 movdqa %xmm0,%xmm5 # qhasm: xmm0 = xmm2 # asm 1: movdqa xmm0=int6464#7 # asm 2: movdqa xmm0=%xmm6 movdqa %xmm1,%xmm6 # qhasm: xmm7 = xmm3 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm0,%xmm7 # qhasm: xmm2 |= xmm1 # asm 1: por xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm11,%xmm2 # qhasm: xmm4 ^= xmm8 # asm 1: pxor xmm5=int6464#3 # asm 2: movdqa xmm5=%xmm2 movdqa %xmm15,%xmm2 # qhasm: xmm5 ^= xmm9 # asm 1: pxor xmm4=int6464#4 # asm 2: movdqa xmm4=%xmm3 movdqa %xmm13,%xmm3 # qhasm: xmm1 = xmm5 # asm 1: movdqa xmm1=int6464#5 # asm 2: movdqa xmm1=%xmm4 movdqa %xmm2,%xmm4 # qhasm: xmm4 ^= xmm14 # asm 1: pxor xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm10,%xmm2 # qhasm: xmm5 = xmm12 # asm 1: movdqa xmm5=int6464#4 # asm 2: movdqa xmm5=%xmm3 movdqa %xmm12,%xmm3 # qhasm: xmm6 = xmm9 # asm 1: movdqa xmm6=int6464#6 # asm 2: movdqa xmm6=%xmm5 movdqa %xmm9,%xmm5 # qhasm: xmm7 = xmm15 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm15,%xmm7 # qhasm: xmm4 &= xmm11 # asm 1: pand xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm0,%xmm2 # qhasm: xmm4 ^= xmm2 # asm 1: pxor xmm6=int6464#4 # asm 2: movdqa xmm6=%xmm3 movdqa %xmm6,%xmm3 # qhasm: xmm6 ^= xmm3 # asm 1: pxor xmm7=int6464#6 # asm 2: movdqa xmm7=%xmm5 movdqa %xmm2,%xmm5 # qhasm: xmm7 &= xmm6 # asm 1: pand xmm5=int6464#8 # asm 2: movdqa xmm5=%xmm7 movdqa %xmm4,%xmm7 # qhasm: xmm5 ^= xmm0 # asm 1: pxor xmm2=int6464#1 # asm 2: movdqa xmm2=%xmm0 movdqa %xmm3,%xmm0 # qhasm: xmm2 ^= xmm5 # asm 1: pxor xmm4=int6464#1 # asm 2: movdqa xmm4=%xmm0 movdqa %xmm14,%xmm0 # qhasm: xmm0 = xmm13 # asm 1: movdqa xmm0=int6464#2 # asm 2: movdqa xmm0=%xmm1 movdqa %xmm13,%xmm1 # qhasm: xmm2 = xmm7 # asm 1: movdqa xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm2 ^= xmm6 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm7,%xmm2 # qhasm: xmm2 ^= xmm1 # asm 1: pxor xmm4=int6464#1 # asm 2: movdqa xmm4=%xmm0 movdqa %xmm15,%xmm0 # qhasm: xmm0 = xmm9 # asm 1: movdqa xmm0=int6464#2 # asm 2: movdqa xmm0=%xmm1 movdqa %xmm9,%xmm1 # qhasm: xmm4 ^= xmm12 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm7,%xmm2 # qhasm: xmm2 ^= xmm1 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm0=int6464#1 # asm 2: pshufd $0x93,xmm0=%xmm0 pshufd $0x93,%xmm8,%xmm0 # qhasm: xmm1 = shuffle dwords of xmm9 by 0x93 # asm 1: pshufd $0x93,xmm1=int6464#2 # asm 2: pshufd $0x93,xmm1=%xmm1 pshufd $0x93,%xmm9,%xmm1 # qhasm: xmm2 = shuffle dwords of xmm12 by 0x93 # asm 1: pshufd $0x93,xmm2=int6464#3 # asm 2: pshufd $0x93,xmm2=%xmm2 pshufd $0x93,%xmm12,%xmm2 # qhasm: xmm3 = shuffle dwords of xmm14 by 0x93 # asm 1: pshufd $0x93,xmm3=int6464#4 # asm 2: pshufd $0x93,xmm3=%xmm3 pshufd $0x93,%xmm14,%xmm3 # qhasm: xmm4 = shuffle dwords of xmm11 by 0x93 # asm 1: pshufd $0x93,xmm4=int6464#5 # asm 2: pshufd $0x93,xmm4=%xmm4 pshufd $0x93,%xmm11,%xmm4 # qhasm: xmm5 = shuffle dwords of xmm15 by 0x93 # asm 1: pshufd $0x93,xmm5=int6464#6 # asm 2: pshufd $0x93,xmm5=%xmm5 pshufd $0x93,%xmm15,%xmm5 # qhasm: xmm6 = shuffle dwords of xmm10 by 0x93 # asm 1: pshufd $0x93,xmm6=int6464#7 # asm 2: pshufd $0x93,xmm6=%xmm6 pshufd $0x93,%xmm10,%xmm6 # qhasm: xmm7 = shuffle dwords of xmm13 by 0x93 # asm 1: pshufd $0x93,xmm7=int6464#8 # asm 2: pshufd $0x93,xmm7=%xmm7 pshufd $0x93,%xmm13,%xmm7 # qhasm: xmm8 ^= xmm0 # asm 1: pxor xmm8=int6464#9 # asm 2: pshufd $0x4E,xmm8=%xmm8 pshufd $0x4E,%xmm8,%xmm8 # qhasm: xmm9 = shuffle dwords of xmm9 by 0x4E # asm 1: pshufd $0x4E,xmm9=int6464#10 # asm 2: pshufd $0x4E,xmm9=%xmm9 pshufd $0x4E,%xmm9,%xmm9 # qhasm: xmm12 = shuffle dwords of xmm12 by 0x4E # asm 1: pshufd $0x4E,xmm12=int6464#13 # asm 2: pshufd $0x4E,xmm12=%xmm12 pshufd $0x4E,%xmm12,%xmm12 # qhasm: xmm14 = shuffle dwords of xmm14 by 0x4E # asm 1: pshufd $0x4E,xmm14=int6464#15 # asm 2: pshufd $0x4E,xmm14=%xmm14 pshufd $0x4E,%xmm14,%xmm14 # qhasm: xmm11 = shuffle dwords of xmm11 by 0x4E # asm 1: pshufd $0x4E,xmm11=int6464#12 # asm 2: pshufd $0x4E,xmm11=%xmm11 pshufd $0x4E,%xmm11,%xmm11 # qhasm: xmm15 = shuffle dwords of xmm15 by 0x4E # asm 1: pshufd $0x4E,xmm15=int6464#16 # asm 2: pshufd $0x4E,xmm15=%xmm15 pshufd $0x4E,%xmm15,%xmm15 # qhasm: xmm10 = shuffle dwords of xmm10 by 0x4E # asm 1: pshufd $0x4E,xmm10=int6464#11 # asm 2: pshufd $0x4E,xmm10=%xmm10 pshufd $0x4E,%xmm10,%xmm10 # qhasm: xmm13 = shuffle dwords of xmm13 by 0x4E # asm 1: pshufd $0x4E,xmm13=int6464#14 # asm 2: pshufd $0x4E,xmm13=%xmm13 pshufd $0x4E,%xmm13,%xmm13 # qhasm: xmm0 ^= xmm8 # asm 1: pxor xmm11=int6464#9 # asm 2: movdqa xmm11=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm10 = xmm1 # asm 1: movdqa xmm10=int6464#10 # asm 2: movdqa xmm10=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm9 = xmm5 # asm 1: movdqa xmm9=int6464#11 # asm 2: movdqa xmm9=%xmm10 movdqa %xmm5,%xmm10 # qhasm: xmm13 = xmm2 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm2,%xmm11 # qhasm: xmm12 = xmm6 # asm 1: movdqa xmm12=int6464#13 # asm 2: movdqa xmm12=%xmm12 movdqa %xmm6,%xmm12 # qhasm: xmm11 ^= xmm4 # asm 1: pxor xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm8,%xmm13 # qhasm: xmm8 = xmm10 # asm 1: movdqa xmm8=int6464#15 # asm 2: movdqa xmm8=%xmm14 movdqa %xmm9,%xmm14 # qhasm: xmm15 = xmm11 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm8,%xmm15 # qhasm: xmm10 |= xmm9 # asm 1: por xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm3,%xmm10 # qhasm: xmm12 ^= xmm0 # asm 1: pxor xmm13=int6464#11 # asm 2: movdqa xmm13=%xmm10 movdqa %xmm7,%xmm10 # qhasm: xmm13 ^= xmm1 # asm 1: pxor xmm12=int6464#12 # asm 2: movdqa xmm12=%xmm11 movdqa %xmm5,%xmm11 # qhasm: xmm9 = xmm13 # asm 1: movdqa xmm9=int6464#13 # asm 2: movdqa xmm9=%xmm12 movdqa %xmm10,%xmm12 # qhasm: xmm12 ^= xmm6 # asm 1: pxor xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm2,%xmm10 # qhasm: xmm13 = xmm4 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm4,%xmm11 # qhasm: xmm14 = xmm1 # asm 1: movdqa xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm1,%xmm13 # qhasm: xmm15 = xmm7 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm7,%xmm15 # qhasm: xmm12 &= xmm3 # asm 1: pand xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm8,%xmm10 # qhasm: xmm12 ^= xmm10 # asm 1: pxor xmm14=int6464#12 # asm 2: movdqa xmm14=%xmm11 movdqa %xmm14,%xmm11 # qhasm: xmm14 ^= xmm11 # asm 1: pxor xmm15=int6464#14 # asm 2: movdqa xmm15=%xmm13 movdqa %xmm10,%xmm13 # qhasm: xmm15 &= xmm14 # asm 1: pand xmm13=int6464#16 # asm 2: movdqa xmm13=%xmm15 movdqa %xmm12,%xmm15 # qhasm: xmm13 ^= xmm8 # asm 1: pxor xmm10=int6464#9 # asm 2: movdqa xmm10=%xmm8 movdqa %xmm11,%xmm8 # qhasm: xmm10 ^= xmm13 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm6,%xmm8 # qhasm: xmm8 = xmm5 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm5,%xmm9 # qhasm: xmm10 = xmm15 # asm 1: movdqa xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm10 ^= xmm14 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm8 = xmm1 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm12 ^= xmm4 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm8=int6464#9 # asm 2: pshufd $0x93,xmm8=%xmm8 pshufd $0x93,%xmm0,%xmm8 # qhasm: xmm9 = shuffle dwords of xmm1 by 0x93 # asm 1: pshufd $0x93,xmm9=int6464#10 # asm 2: pshufd $0x93,xmm9=%xmm9 pshufd $0x93,%xmm1,%xmm9 # qhasm: xmm10 = shuffle dwords of xmm4 by 0x93 # asm 1: pshufd $0x93,xmm10=int6464#11 # asm 2: pshufd $0x93,xmm10=%xmm10 pshufd $0x93,%xmm4,%xmm10 # qhasm: xmm11 = shuffle dwords of xmm6 by 0x93 # asm 1: pshufd $0x93,xmm11=int6464#12 # asm 2: pshufd $0x93,xmm11=%xmm11 pshufd $0x93,%xmm6,%xmm11 # qhasm: xmm12 = shuffle dwords of xmm3 by 0x93 # asm 1: pshufd $0x93,xmm12=int6464#13 # asm 2: pshufd $0x93,xmm12=%xmm12 pshufd $0x93,%xmm3,%xmm12 # qhasm: xmm13 = shuffle dwords of xmm7 by 0x93 # asm 1: pshufd $0x93,xmm13=int6464#14 # asm 2: pshufd $0x93,xmm13=%xmm13 pshufd $0x93,%xmm7,%xmm13 # qhasm: xmm14 = shuffle dwords of xmm2 by 0x93 # asm 1: pshufd $0x93,xmm14=int6464#15 # asm 2: pshufd $0x93,xmm14=%xmm14 pshufd $0x93,%xmm2,%xmm14 # qhasm: xmm15 = shuffle dwords of xmm5 by 0x93 # asm 1: pshufd $0x93,xmm15=int6464#16 # asm 2: pshufd $0x93,xmm15=%xmm15 pshufd $0x93,%xmm5,%xmm15 # qhasm: xmm0 ^= xmm8 # asm 1: pxor xmm0=int6464#1 # asm 2: pshufd $0x4E,xmm0=%xmm0 pshufd $0x4E,%xmm0,%xmm0 # qhasm: xmm1 = shuffle dwords of xmm1 by 0x4E # asm 1: pshufd $0x4E,xmm1=int6464#2 # asm 2: pshufd $0x4E,xmm1=%xmm1 pshufd $0x4E,%xmm1,%xmm1 # qhasm: xmm4 = shuffle dwords of xmm4 by 0x4E # asm 1: pshufd $0x4E,xmm4=int6464#5 # asm 2: pshufd $0x4E,xmm4=%xmm4 pshufd $0x4E,%xmm4,%xmm4 # qhasm: xmm6 = shuffle dwords of xmm6 by 0x4E # asm 1: pshufd $0x4E,xmm6=int6464#7 # asm 2: pshufd $0x4E,xmm6=%xmm6 pshufd $0x4E,%xmm6,%xmm6 # qhasm: xmm3 = shuffle dwords of xmm3 by 0x4E # asm 1: pshufd $0x4E,xmm3=int6464#4 # asm 2: pshufd $0x4E,xmm3=%xmm3 pshufd $0x4E,%xmm3,%xmm3 # qhasm: xmm7 = shuffle dwords of xmm7 by 0x4E # asm 1: pshufd $0x4E,xmm7=int6464#8 # asm 2: pshufd $0x4E,xmm7=%xmm7 pshufd $0x4E,%xmm7,%xmm7 # qhasm: xmm2 = shuffle dwords of xmm2 by 0x4E # asm 1: pshufd $0x4E,xmm2=int6464#3 # asm 2: pshufd $0x4E,xmm2=%xmm2 pshufd $0x4E,%xmm2,%xmm2 # qhasm: xmm5 = shuffle dwords of xmm5 by 0x4E # asm 1: pshufd $0x4E,xmm5=int6464#6 # asm 2: pshufd $0x4E,xmm5=%xmm5 pshufd $0x4E,%xmm5,%xmm5 # qhasm: xmm8 ^= xmm0 # asm 1: pxor xmm3=int6464#1 # asm 2: movdqa xmm3=%xmm0 movdqa %xmm15,%xmm0 # qhasm: xmm2 = xmm9 # asm 1: movdqa xmm2=int6464#2 # asm 2: movdqa xmm2=%xmm1 movdqa %xmm9,%xmm1 # qhasm: xmm1 = xmm13 # asm 1: movdqa xmm1=int6464#3 # asm 2: movdqa xmm1=%xmm2 movdqa %xmm13,%xmm2 # qhasm: xmm5 = xmm10 # asm 1: movdqa xmm5=int6464#4 # asm 2: movdqa xmm5=%xmm3 movdqa %xmm10,%xmm3 # qhasm: xmm4 = xmm14 # asm 1: movdqa xmm4=int6464#5 # asm 2: movdqa xmm4=%xmm4 movdqa %xmm14,%xmm4 # qhasm: xmm3 ^= xmm12 # asm 1: pxor xmm6=int6464#6 # asm 2: movdqa xmm6=%xmm5 movdqa %xmm0,%xmm5 # qhasm: xmm0 = xmm2 # asm 1: movdqa xmm0=int6464#7 # asm 2: movdqa xmm0=%xmm6 movdqa %xmm1,%xmm6 # qhasm: xmm7 = xmm3 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm0,%xmm7 # qhasm: xmm2 |= xmm1 # asm 1: por xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm11,%xmm2 # qhasm: xmm4 ^= xmm8 # asm 1: pxor xmm5=int6464#3 # asm 2: movdqa xmm5=%xmm2 movdqa %xmm15,%xmm2 # qhasm: xmm5 ^= xmm9 # asm 1: pxor xmm4=int6464#4 # asm 2: movdqa xmm4=%xmm3 movdqa %xmm13,%xmm3 # qhasm: xmm1 = xmm5 # asm 1: movdqa xmm1=int6464#5 # asm 2: movdqa xmm1=%xmm4 movdqa %xmm2,%xmm4 # qhasm: xmm4 ^= xmm14 # asm 1: pxor xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm10,%xmm2 # qhasm: xmm5 = xmm12 # asm 1: movdqa xmm5=int6464#4 # asm 2: movdqa xmm5=%xmm3 movdqa %xmm12,%xmm3 # qhasm: xmm6 = xmm9 # asm 1: movdqa xmm6=int6464#6 # asm 2: movdqa xmm6=%xmm5 movdqa %xmm9,%xmm5 # qhasm: xmm7 = xmm15 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm15,%xmm7 # qhasm: xmm4 &= xmm11 # asm 1: pand xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm0,%xmm2 # qhasm: xmm4 ^= xmm2 # asm 1: pxor xmm6=int6464#4 # asm 2: movdqa xmm6=%xmm3 movdqa %xmm6,%xmm3 # qhasm: xmm6 ^= xmm3 # asm 1: pxor xmm7=int6464#6 # asm 2: movdqa xmm7=%xmm5 movdqa %xmm2,%xmm5 # qhasm: xmm7 &= xmm6 # asm 1: pand xmm5=int6464#8 # asm 2: movdqa xmm5=%xmm7 movdqa %xmm4,%xmm7 # qhasm: xmm5 ^= xmm0 # asm 1: pxor xmm2=int6464#1 # asm 2: movdqa xmm2=%xmm0 movdqa %xmm3,%xmm0 # qhasm: xmm2 ^= xmm5 # asm 1: pxor xmm4=int6464#1 # asm 2: movdqa xmm4=%xmm0 movdqa %xmm14,%xmm0 # qhasm: xmm0 = xmm13 # asm 1: movdqa xmm0=int6464#2 # asm 2: movdqa xmm0=%xmm1 movdqa %xmm13,%xmm1 # qhasm: xmm2 = xmm7 # asm 1: movdqa xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm2 ^= xmm6 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm7,%xmm2 # qhasm: xmm2 ^= xmm1 # asm 1: pxor xmm4=int6464#1 # asm 2: movdqa xmm4=%xmm0 movdqa %xmm15,%xmm0 # qhasm: xmm0 = xmm9 # asm 1: movdqa xmm0=int6464#2 # asm 2: movdqa xmm0=%xmm1 movdqa %xmm9,%xmm1 # qhasm: xmm4 ^= xmm12 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm7,%xmm2 # qhasm: xmm2 ^= xmm1 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm0=int6464#1 # asm 2: pshufd $0x93,xmm0=%xmm0 pshufd $0x93,%xmm8,%xmm0 # qhasm: xmm1 = shuffle dwords of xmm9 by 0x93 # asm 1: pshufd $0x93,xmm1=int6464#2 # asm 2: pshufd $0x93,xmm1=%xmm1 pshufd $0x93,%xmm9,%xmm1 # qhasm: xmm2 = shuffle dwords of xmm12 by 0x93 # asm 1: pshufd $0x93,xmm2=int6464#3 # asm 2: pshufd $0x93,xmm2=%xmm2 pshufd $0x93,%xmm12,%xmm2 # qhasm: xmm3 = shuffle dwords of xmm14 by 0x93 # asm 1: pshufd $0x93,xmm3=int6464#4 # asm 2: pshufd $0x93,xmm3=%xmm3 pshufd $0x93,%xmm14,%xmm3 # qhasm: xmm4 = shuffle dwords of xmm11 by 0x93 # asm 1: pshufd $0x93,xmm4=int6464#5 # asm 2: pshufd $0x93,xmm4=%xmm4 pshufd $0x93,%xmm11,%xmm4 # qhasm: xmm5 = shuffle dwords of xmm15 by 0x93 # asm 1: pshufd $0x93,xmm5=int6464#6 # asm 2: pshufd $0x93,xmm5=%xmm5 pshufd $0x93,%xmm15,%xmm5 # qhasm: xmm6 = shuffle dwords of xmm10 by 0x93 # asm 1: pshufd $0x93,xmm6=int6464#7 # asm 2: pshufd $0x93,xmm6=%xmm6 pshufd $0x93,%xmm10,%xmm6 # qhasm: xmm7 = shuffle dwords of xmm13 by 0x93 # asm 1: pshufd $0x93,xmm7=int6464#8 # asm 2: pshufd $0x93,xmm7=%xmm7 pshufd $0x93,%xmm13,%xmm7 # qhasm: xmm8 ^= xmm0 # asm 1: pxor xmm8=int6464#9 # asm 2: pshufd $0x4E,xmm8=%xmm8 pshufd $0x4E,%xmm8,%xmm8 # qhasm: xmm9 = shuffle dwords of xmm9 by 0x4E # asm 1: pshufd $0x4E,xmm9=int6464#10 # asm 2: pshufd $0x4E,xmm9=%xmm9 pshufd $0x4E,%xmm9,%xmm9 # qhasm: xmm12 = shuffle dwords of xmm12 by 0x4E # asm 1: pshufd $0x4E,xmm12=int6464#13 # asm 2: pshufd $0x4E,xmm12=%xmm12 pshufd $0x4E,%xmm12,%xmm12 # qhasm: xmm14 = shuffle dwords of xmm14 by 0x4E # asm 1: pshufd $0x4E,xmm14=int6464#15 # asm 2: pshufd $0x4E,xmm14=%xmm14 pshufd $0x4E,%xmm14,%xmm14 # qhasm: xmm11 = shuffle dwords of xmm11 by 0x4E # asm 1: pshufd $0x4E,xmm11=int6464#12 # asm 2: pshufd $0x4E,xmm11=%xmm11 pshufd $0x4E,%xmm11,%xmm11 # qhasm: xmm15 = shuffle dwords of xmm15 by 0x4E # asm 1: pshufd $0x4E,xmm15=int6464#16 # asm 2: pshufd $0x4E,xmm15=%xmm15 pshufd $0x4E,%xmm15,%xmm15 # qhasm: xmm10 = shuffle dwords of xmm10 by 0x4E # asm 1: pshufd $0x4E,xmm10=int6464#11 # asm 2: pshufd $0x4E,xmm10=%xmm10 pshufd $0x4E,%xmm10,%xmm10 # qhasm: xmm13 = shuffle dwords of xmm13 by 0x4E # asm 1: pshufd $0x4E,xmm13=int6464#14 # asm 2: pshufd $0x4E,xmm13=%xmm13 pshufd $0x4E,%xmm13,%xmm13 # qhasm: xmm0 ^= xmm8 # asm 1: pxor xmm11=int6464#9 # asm 2: movdqa xmm11=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm10 = xmm1 # asm 1: movdqa xmm10=int6464#10 # asm 2: movdqa xmm10=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm9 = xmm5 # asm 1: movdqa xmm9=int6464#11 # asm 2: movdqa xmm9=%xmm10 movdqa %xmm5,%xmm10 # qhasm: xmm13 = xmm2 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm2,%xmm11 # qhasm: xmm12 = xmm6 # asm 1: movdqa xmm12=int6464#13 # asm 2: movdqa xmm12=%xmm12 movdqa %xmm6,%xmm12 # qhasm: xmm11 ^= xmm4 # asm 1: pxor xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm8,%xmm13 # qhasm: xmm8 = xmm10 # asm 1: movdqa xmm8=int6464#15 # asm 2: movdqa xmm8=%xmm14 movdqa %xmm9,%xmm14 # qhasm: xmm15 = xmm11 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm8,%xmm15 # qhasm: xmm10 |= xmm9 # asm 1: por xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm3,%xmm10 # qhasm: xmm12 ^= xmm0 # asm 1: pxor xmm13=int6464#11 # asm 2: movdqa xmm13=%xmm10 movdqa %xmm7,%xmm10 # qhasm: xmm13 ^= xmm1 # asm 1: pxor xmm12=int6464#12 # asm 2: movdqa xmm12=%xmm11 movdqa %xmm5,%xmm11 # qhasm: xmm9 = xmm13 # asm 1: movdqa xmm9=int6464#13 # asm 2: movdqa xmm9=%xmm12 movdqa %xmm10,%xmm12 # qhasm: xmm12 ^= xmm6 # asm 1: pxor xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm2,%xmm10 # qhasm: xmm13 = xmm4 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm4,%xmm11 # qhasm: xmm14 = xmm1 # asm 1: movdqa xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm1,%xmm13 # qhasm: xmm15 = xmm7 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm7,%xmm15 # qhasm: xmm12 &= xmm3 # asm 1: pand xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm8,%xmm10 # qhasm: xmm12 ^= xmm10 # asm 1: pxor xmm14=int6464#12 # asm 2: movdqa xmm14=%xmm11 movdqa %xmm14,%xmm11 # qhasm: xmm14 ^= xmm11 # asm 1: pxor xmm15=int6464#14 # asm 2: movdqa xmm15=%xmm13 movdqa %xmm10,%xmm13 # qhasm: xmm15 &= xmm14 # asm 1: pand xmm13=int6464#16 # asm 2: movdqa xmm13=%xmm15 movdqa %xmm12,%xmm15 # qhasm: xmm13 ^= xmm8 # asm 1: pxor xmm10=int6464#9 # asm 2: movdqa xmm10=%xmm8 movdqa %xmm11,%xmm8 # qhasm: xmm10 ^= xmm13 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm6,%xmm8 # qhasm: xmm8 = xmm5 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm5,%xmm9 # qhasm: xmm10 = xmm15 # asm 1: movdqa xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm10 ^= xmm14 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm8 = xmm1 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm12 ^= xmm4 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm8=int6464#9 # asm 2: pshufd $0x93,xmm8=%xmm8 pshufd $0x93,%xmm0,%xmm8 # qhasm: xmm9 = shuffle dwords of xmm1 by 0x93 # asm 1: pshufd $0x93,xmm9=int6464#10 # asm 2: pshufd $0x93,xmm9=%xmm9 pshufd $0x93,%xmm1,%xmm9 # qhasm: xmm10 = shuffle dwords of xmm4 by 0x93 # asm 1: pshufd $0x93,xmm10=int6464#11 # asm 2: pshufd $0x93,xmm10=%xmm10 pshufd $0x93,%xmm4,%xmm10 # qhasm: xmm11 = shuffle dwords of xmm6 by 0x93 # asm 1: pshufd $0x93,xmm11=int6464#12 # asm 2: pshufd $0x93,xmm11=%xmm11 pshufd $0x93,%xmm6,%xmm11 # qhasm: xmm12 = shuffle dwords of xmm3 by 0x93 # asm 1: pshufd $0x93,xmm12=int6464#13 # asm 2: pshufd $0x93,xmm12=%xmm12 pshufd $0x93,%xmm3,%xmm12 # qhasm: xmm13 = shuffle dwords of xmm7 by 0x93 # asm 1: pshufd $0x93,xmm13=int6464#14 # asm 2: pshufd $0x93,xmm13=%xmm13 pshufd $0x93,%xmm7,%xmm13 # qhasm: xmm14 = shuffle dwords of xmm2 by 0x93 # asm 1: pshufd $0x93,xmm14=int6464#15 # asm 2: pshufd $0x93,xmm14=%xmm14 pshufd $0x93,%xmm2,%xmm14 # qhasm: xmm15 = shuffle dwords of xmm5 by 0x93 # asm 1: pshufd $0x93,xmm15=int6464#16 # asm 2: pshufd $0x93,xmm15=%xmm15 pshufd $0x93,%xmm5,%xmm15 # qhasm: xmm0 ^= xmm8 # asm 1: pxor xmm0=int6464#1 # asm 2: pshufd $0x4E,xmm0=%xmm0 pshufd $0x4E,%xmm0,%xmm0 # qhasm: xmm1 = shuffle dwords of xmm1 by 0x4E # asm 1: pshufd $0x4E,xmm1=int6464#2 # asm 2: pshufd $0x4E,xmm1=%xmm1 pshufd $0x4E,%xmm1,%xmm1 # qhasm: xmm4 = shuffle dwords of xmm4 by 0x4E # asm 1: pshufd $0x4E,xmm4=int6464#5 # asm 2: pshufd $0x4E,xmm4=%xmm4 pshufd $0x4E,%xmm4,%xmm4 # qhasm: xmm6 = shuffle dwords of xmm6 by 0x4E # asm 1: pshufd $0x4E,xmm6=int6464#7 # asm 2: pshufd $0x4E,xmm6=%xmm6 pshufd $0x4E,%xmm6,%xmm6 # qhasm: xmm3 = shuffle dwords of xmm3 by 0x4E # asm 1: pshufd $0x4E,xmm3=int6464#4 # asm 2: pshufd $0x4E,xmm3=%xmm3 pshufd $0x4E,%xmm3,%xmm3 # qhasm: xmm7 = shuffle dwords of xmm7 by 0x4E # asm 1: pshufd $0x4E,xmm7=int6464#8 # asm 2: pshufd $0x4E,xmm7=%xmm7 pshufd $0x4E,%xmm7,%xmm7 # qhasm: xmm2 = shuffle dwords of xmm2 by 0x4E # asm 1: pshufd $0x4E,xmm2=int6464#3 # asm 2: pshufd $0x4E,xmm2=%xmm2 pshufd $0x4E,%xmm2,%xmm2 # qhasm: xmm5 = shuffle dwords of xmm5 by 0x4E # asm 1: pshufd $0x4E,xmm5=int6464#6 # asm 2: pshufd $0x4E,xmm5=%xmm5 pshufd $0x4E,%xmm5,%xmm5 # qhasm: xmm8 ^= xmm0 # asm 1: pxor xmm3=int6464#1 # asm 2: movdqa xmm3=%xmm0 movdqa %xmm15,%xmm0 # qhasm: xmm2 = xmm9 # asm 1: movdqa xmm2=int6464#2 # asm 2: movdqa xmm2=%xmm1 movdqa %xmm9,%xmm1 # qhasm: xmm1 = xmm13 # asm 1: movdqa xmm1=int6464#3 # asm 2: movdqa xmm1=%xmm2 movdqa %xmm13,%xmm2 # qhasm: xmm5 = xmm10 # asm 1: movdqa xmm5=int6464#4 # asm 2: movdqa xmm5=%xmm3 movdqa %xmm10,%xmm3 # qhasm: xmm4 = xmm14 # asm 1: movdqa xmm4=int6464#5 # asm 2: movdqa xmm4=%xmm4 movdqa %xmm14,%xmm4 # qhasm: xmm3 ^= xmm12 # asm 1: pxor xmm6=int6464#6 # asm 2: movdqa xmm6=%xmm5 movdqa %xmm0,%xmm5 # qhasm: xmm0 = xmm2 # asm 1: movdqa xmm0=int6464#7 # asm 2: movdqa xmm0=%xmm6 movdqa %xmm1,%xmm6 # qhasm: xmm7 = xmm3 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm0,%xmm7 # qhasm: xmm2 |= xmm1 # asm 1: por xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm11,%xmm2 # qhasm: xmm4 ^= xmm8 # asm 1: pxor xmm5=int6464#3 # asm 2: movdqa xmm5=%xmm2 movdqa %xmm15,%xmm2 # qhasm: xmm5 ^= xmm9 # asm 1: pxor xmm4=int6464#4 # asm 2: movdqa xmm4=%xmm3 movdqa %xmm13,%xmm3 # qhasm: xmm1 = xmm5 # asm 1: movdqa xmm1=int6464#5 # asm 2: movdqa xmm1=%xmm4 movdqa %xmm2,%xmm4 # qhasm: xmm4 ^= xmm14 # asm 1: pxor xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm10,%xmm2 # qhasm: xmm5 = xmm12 # asm 1: movdqa xmm5=int6464#4 # asm 2: movdqa xmm5=%xmm3 movdqa %xmm12,%xmm3 # qhasm: xmm6 = xmm9 # asm 1: movdqa xmm6=int6464#6 # asm 2: movdqa xmm6=%xmm5 movdqa %xmm9,%xmm5 # qhasm: xmm7 = xmm15 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm15,%xmm7 # qhasm: xmm4 &= xmm11 # asm 1: pand xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm0,%xmm2 # qhasm: xmm4 ^= xmm2 # asm 1: pxor xmm6=int6464#4 # asm 2: movdqa xmm6=%xmm3 movdqa %xmm6,%xmm3 # qhasm: xmm6 ^= xmm3 # asm 1: pxor xmm7=int6464#6 # asm 2: movdqa xmm7=%xmm5 movdqa %xmm2,%xmm5 # qhasm: xmm7 &= xmm6 # asm 1: pand xmm5=int6464#8 # asm 2: movdqa xmm5=%xmm7 movdqa %xmm4,%xmm7 # qhasm: xmm5 ^= xmm0 # asm 1: pxor xmm2=int6464#1 # asm 2: movdqa xmm2=%xmm0 movdqa %xmm3,%xmm0 # qhasm: xmm2 ^= xmm5 # asm 1: pxor xmm4=int6464#1 # asm 2: movdqa xmm4=%xmm0 movdqa %xmm14,%xmm0 # qhasm: xmm0 = xmm13 # asm 1: movdqa xmm0=int6464#2 # asm 2: movdqa xmm0=%xmm1 movdqa %xmm13,%xmm1 # qhasm: xmm2 = xmm7 # asm 1: movdqa xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm2 ^= xmm6 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm7,%xmm2 # qhasm: xmm2 ^= xmm1 # asm 1: pxor xmm4=int6464#1 # asm 2: movdqa xmm4=%xmm0 movdqa %xmm15,%xmm0 # qhasm: xmm0 = xmm9 # asm 1: movdqa xmm0=int6464#2 # asm 2: movdqa xmm0=%xmm1 movdqa %xmm9,%xmm1 # qhasm: xmm4 ^= xmm12 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm7,%xmm2 # qhasm: xmm2 ^= xmm1 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm10,%xmm0 # qhasm: uint6464 xmm0 >>= 1 # asm 1: psrlq $1,xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm11,%xmm0 # qhasm: uint6464 xmm0 >>= 1 # asm 1: psrlq $1,xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm12,%xmm0 # qhasm: uint6464 xmm0 >>= 1 # asm 1: psrlq $1,xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm8,%xmm0 # qhasm: uint6464 xmm0 >>= 1 # asm 1: psrlq $1,xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm15,%xmm0 # qhasm: uint6464 xmm0 >>= 2 # asm 1: psrlq $2,xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm11,%xmm0 # qhasm: uint6464 xmm0 >>= 2 # asm 1: psrlq $2,xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm9,%xmm0 # qhasm: uint6464 xmm0 >>= 2 # asm 1: psrlq $2,xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm8,%xmm0 # qhasm: uint6464 xmm0 >>= 2 # asm 1: psrlq $2,xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm14,%xmm0 # qhasm: uint6464 xmm0 >>= 4 # asm 1: psrlq $4,xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm12,%xmm0 # qhasm: uint6464 xmm0 >>= 4 # asm 1: psrlq $4,xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm9,%xmm0 # qhasm: uint6464 xmm0 >>= 4 # asm 1: psrlq $4,xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm8,%xmm0 # qhasm: uint6464 xmm0 >>= 4 # asm 1: psrlq $4,tmp=int64#5d # asm 2: movl 12(tmp=%r8d movl 12(%rdx),%r8d # qhasm: (uint32) bswap tmp # asm 1: bswap lensav=int64#4 # asm 2: mov lensav=%rcx mov %rsi,%rcx # qhasm: (uint32) len >>= 4 # asm 1: shr $4,tmp=int64#5d # asm 2: movl 12(tmp=%r8d movl 12(%rdx),%r8d # qhasm: (uint32) bswap tmp # asm 1: bswap blp=int64#2 # asm 2: leaq blp=%rsi leaq 32(%rsp),%rsi # qhasm: *(int128 *)(blp + 0) = xmm8 # asm 1: movdqa b=int64#3 # asm 2: movzbq 0(b=%rdx movzbq 0(%rsi),%rdx # qhasm: *(uint8 *)(outp + 0) = b # asm 1: movb tmp=int64#4d # asm 2: movl 12(tmp=%ecx movl 12(%rdx),%ecx # qhasm: (uint32) bswap tmp # asm 1: bswap c=int64#1 # asm 2: mov c=%rdi mov %rdi,%rdi # qhasm: k = arg2 # asm 1: mov k=int64#2 # asm 2: mov k=%rsi mov %rsi,%rsi # qhasm: xmm0 = *(int128 *) (k + 0) # asm 1: movdqa 0(xmm0=int6464#1 # asm 2: movdqa 0(xmm0=%xmm0 movdqa 0(%rsi),%xmm0 # qhasm: shuffle bytes of xmm0 by M0 # asm 1: pshufb M0,xmm1=int6464#2 # asm 2: movdqa xmm1=%xmm1 movdqa %xmm0,%xmm1 # qhasm: xmm2 = xmm0 # asm 1: movdqa xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm0,%xmm2 # qhasm: xmm3 = xmm0 # asm 1: movdqa xmm3=int6464#4 # asm 2: movdqa xmm3=%xmm3 movdqa %xmm0,%xmm3 # qhasm: xmm4 = xmm0 # asm 1: movdqa xmm4=int6464#5 # asm 2: movdqa xmm4=%xmm4 movdqa %xmm0,%xmm4 # qhasm: xmm5 = xmm0 # asm 1: movdqa xmm5=int6464#6 # asm 2: movdqa xmm5=%xmm5 movdqa %xmm0,%xmm5 # qhasm: xmm6 = xmm0 # asm 1: movdqa xmm6=int6464#7 # asm 2: movdqa xmm6=%xmm6 movdqa %xmm0,%xmm6 # qhasm: xmm7 = xmm0 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm0,%xmm7 # qhasm: t = xmm6 # asm 1: movdqa t=int6464#9 # asm 2: movdqa t=%xmm8 movdqa %xmm6,%xmm8 # qhasm: uint6464 t >>= 1 # asm 1: psrlq $1,t=int6464#9 # asm 2: movdqa t=%xmm8 movdqa %xmm4,%xmm8 # qhasm: uint6464 t >>= 1 # asm 1: psrlq $1,t=int6464#9 # asm 2: movdqa t=%xmm8 movdqa %xmm2,%xmm8 # qhasm: uint6464 t >>= 1 # asm 1: psrlq $1,t=int6464#9 # asm 2: movdqa t=%xmm8 movdqa %xmm0,%xmm8 # qhasm: uint6464 t >>= 1 # asm 1: psrlq $1,t=int6464#9 # asm 2: movdqa t=%xmm8 movdqa %xmm5,%xmm8 # qhasm: uint6464 t >>= 2 # asm 1: psrlq $2,t=int6464#9 # asm 2: movdqa t=%xmm8 movdqa %xmm4,%xmm8 # qhasm: uint6464 t >>= 2 # asm 1: psrlq $2,t=int6464#9 # asm 2: movdqa t=%xmm8 movdqa %xmm1,%xmm8 # qhasm: uint6464 t >>= 2 # asm 1: psrlq $2,t=int6464#9 # asm 2: movdqa t=%xmm8 movdqa %xmm0,%xmm8 # qhasm: uint6464 t >>= 2 # asm 1: psrlq $2,t=int6464#9 # asm 2: movdqa t=%xmm8 movdqa %xmm3,%xmm8 # qhasm: uint6464 t >>= 4 # asm 1: psrlq $4,t=int6464#9 # asm 2: movdqa t=%xmm8 movdqa %xmm2,%xmm8 # qhasm: uint6464 t >>= 4 # asm 1: psrlq $4,t=int6464#9 # asm 2: movdqa t=%xmm8 movdqa %xmm1,%xmm8 # qhasm: uint6464 t >>= 4 # asm 1: psrlq $4,t=int6464#9 # asm 2: movdqa t=%xmm8 movdqa %xmm0,%xmm8 # qhasm: uint6464 t >>= 4 # asm 1: psrlq $4,xmm11=int6464#9 # asm 2: movdqa xmm11=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm10 = xmm1 # asm 1: movdqa xmm10=int6464#10 # asm 2: movdqa xmm10=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm9 = xmm5 # asm 1: movdqa xmm9=int6464#11 # asm 2: movdqa xmm9=%xmm10 movdqa %xmm5,%xmm10 # qhasm: xmm13 = xmm2 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm2,%xmm11 # qhasm: xmm12 = xmm6 # asm 1: movdqa xmm12=int6464#13 # asm 2: movdqa xmm12=%xmm12 movdqa %xmm6,%xmm12 # qhasm: xmm11 ^= xmm4 # asm 1: pxor xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm8,%xmm13 # qhasm: xmm8 = xmm10 # asm 1: movdqa xmm8=int6464#15 # asm 2: movdqa xmm8=%xmm14 movdqa %xmm9,%xmm14 # qhasm: xmm15 = xmm11 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm8,%xmm15 # qhasm: xmm10 |= xmm9 # asm 1: por xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm3,%xmm10 # qhasm: xmm12 ^= xmm0 # asm 1: pxor xmm13=int6464#11 # asm 2: movdqa xmm13=%xmm10 movdqa %xmm7,%xmm10 # qhasm: xmm13 ^= xmm1 # asm 1: pxor xmm12=int6464#12 # asm 2: movdqa xmm12=%xmm11 movdqa %xmm5,%xmm11 # qhasm: xmm9 = xmm13 # asm 1: movdqa xmm9=int6464#13 # asm 2: movdqa xmm9=%xmm12 movdqa %xmm10,%xmm12 # qhasm: xmm12 ^= xmm6 # asm 1: pxor xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm2,%xmm10 # qhasm: xmm13 = xmm4 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm4,%xmm11 # qhasm: xmm14 = xmm1 # asm 1: movdqa xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm1,%xmm13 # qhasm: xmm15 = xmm7 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm7,%xmm15 # qhasm: xmm12 &= xmm3 # asm 1: pand xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm8,%xmm10 # qhasm: xmm12 ^= xmm10 # asm 1: pxor xmm14=int6464#12 # asm 2: movdqa xmm14=%xmm11 movdqa %xmm14,%xmm11 # qhasm: xmm14 ^= xmm11 # asm 1: pxor xmm15=int6464#14 # asm 2: movdqa xmm15=%xmm13 movdqa %xmm10,%xmm13 # qhasm: xmm15 &= xmm14 # asm 1: pand xmm13=int6464#16 # asm 2: movdqa xmm13=%xmm15 movdqa %xmm12,%xmm15 # qhasm: xmm13 ^= xmm8 # asm 1: pxor xmm10=int6464#9 # asm 2: movdqa xmm10=%xmm8 movdqa %xmm11,%xmm8 # qhasm: xmm10 ^= xmm13 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm6,%xmm8 # qhasm: xmm8 = xmm5 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm5,%xmm9 # qhasm: xmm10 = xmm15 # asm 1: movdqa xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm10 ^= xmm14 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm8 = xmm1 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm12 ^= xmm4 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm8=int6464#9 # asm 2: movdqa 0(xmm8=%xmm8 movdqa 0(%rdi),%xmm8 # qhasm: xmm9 = *(int128 *)(c + 16) # asm 1: movdqa 16(xmm9=int6464#10 # asm 2: movdqa 16(xmm9=%xmm9 movdqa 16(%rdi),%xmm9 # qhasm: xmm10 = *(int128 *)(c + 32) # asm 1: movdqa 32(xmm10=int6464#11 # asm 2: movdqa 32(xmm10=%xmm10 movdqa 32(%rdi),%xmm10 # qhasm: xmm11 = *(int128 *)(c + 48) # asm 1: movdqa 48(xmm11=int6464#12 # asm 2: movdqa 48(xmm11=%xmm11 movdqa 48(%rdi),%xmm11 # qhasm: xmm12 = *(int128 *)(c + 64) # asm 1: movdqa 64(xmm12=int6464#13 # asm 2: movdqa 64(xmm12=%xmm12 movdqa 64(%rdi),%xmm12 # qhasm: xmm13 = *(int128 *)(c + 80) # asm 1: movdqa 80(xmm13=int6464#14 # asm 2: movdqa 80(xmm13=%xmm13 movdqa 80(%rdi),%xmm13 # qhasm: xmm14 = *(int128 *)(c + 96) # asm 1: movdqa 96(xmm14=int6464#15 # asm 2: movdqa 96(xmm14=%xmm14 movdqa 96(%rdi),%xmm14 # qhasm: xmm15 = *(int128 *)(c + 112) # asm 1: movdqa 112(xmm15=int6464#16 # asm 2: movdqa 112(xmm15=%xmm15 movdqa 112(%rdi),%xmm15 # qhasm: xmm0 ^= xmm8 # asm 1: pxor >= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,xmm11=int6464#9 # asm 2: movdqa xmm11=%xmm8 movdqa %xmm5,%xmm8 # qhasm: xmm10 = xmm1 # asm 1: movdqa xmm10=int6464#10 # asm 2: movdqa xmm10=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm9 = xmm7 # asm 1: movdqa xmm9=int6464#11 # asm 2: movdqa xmm9=%xmm10 movdqa %xmm7,%xmm10 # qhasm: xmm13 = xmm4 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm4,%xmm11 # qhasm: xmm12 = xmm2 # asm 1: movdqa xmm12=int6464#13 # asm 2: movdqa xmm12=%xmm12 movdqa %xmm2,%xmm12 # qhasm: xmm11 ^= xmm3 # asm 1: pxor xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm8,%xmm13 # qhasm: xmm8 = xmm10 # asm 1: movdqa xmm8=int6464#15 # asm 2: movdqa xmm8=%xmm14 movdqa %xmm9,%xmm14 # qhasm: xmm15 = xmm11 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm8,%xmm15 # qhasm: xmm10 |= xmm9 # asm 1: por xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm6,%xmm10 # qhasm: xmm12 ^= xmm0 # asm 1: pxor xmm13=int6464#11 # asm 2: movdqa xmm13=%xmm10 movdqa %xmm5,%xmm10 # qhasm: xmm13 ^= xmm1 # asm 1: pxor xmm12=int6464#12 # asm 2: movdqa xmm12=%xmm11 movdqa %xmm7,%xmm11 # qhasm: xmm9 = xmm13 # asm 1: movdqa xmm9=int6464#13 # asm 2: movdqa xmm9=%xmm12 movdqa %xmm10,%xmm12 # qhasm: xmm12 ^= xmm2 # asm 1: pxor xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm4,%xmm10 # qhasm: xmm13 = xmm3 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm3,%xmm11 # qhasm: xmm14 = xmm1 # asm 1: movdqa xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm1,%xmm13 # qhasm: xmm15 = xmm5 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm5,%xmm15 # qhasm: xmm12 &= xmm6 # asm 1: pand xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm8,%xmm10 # qhasm: xmm12 ^= xmm10 # asm 1: pxor xmm14=int6464#12 # asm 2: movdqa xmm14=%xmm11 movdqa %xmm14,%xmm11 # qhasm: xmm14 ^= xmm11 # asm 1: pxor xmm15=int6464#14 # asm 2: movdqa xmm15=%xmm13 movdqa %xmm10,%xmm13 # qhasm: xmm15 &= xmm14 # asm 1: pand xmm13=int6464#16 # asm 2: movdqa xmm13=%xmm15 movdqa %xmm12,%xmm15 # qhasm: xmm13 ^= xmm8 # asm 1: pxor xmm10=int6464#9 # asm 2: movdqa xmm10=%xmm8 movdqa %xmm11,%xmm8 # qhasm: xmm10 ^= xmm13 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm2,%xmm8 # qhasm: xmm8 = xmm7 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm7,%xmm9 # qhasm: xmm10 = xmm15 # asm 1: movdqa xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm10 ^= xmm14 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm5,%xmm8 # qhasm: xmm8 = xmm1 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm12 ^= xmm3 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm8=int6464#9 # asm 2: movdqa 128(xmm8=%xmm8 movdqa 128(%rdi),%xmm8 # qhasm: xmm9 = *(int128 *)(c + 144) # asm 1: movdqa 144(xmm9=int6464#10 # asm 2: movdqa 144(xmm9=%xmm9 movdqa 144(%rdi),%xmm9 # qhasm: xmm10 = *(int128 *)(c + 160) # asm 1: movdqa 160(xmm10=int6464#11 # asm 2: movdqa 160(xmm10=%xmm10 movdqa 160(%rdi),%xmm10 # qhasm: xmm11 = *(int128 *)(c + 176) # asm 1: movdqa 176(xmm11=int6464#12 # asm 2: movdqa 176(xmm11=%xmm11 movdqa 176(%rdi),%xmm11 # qhasm: xmm12 = *(int128 *)(c + 192) # asm 1: movdqa 192(xmm12=int6464#13 # asm 2: movdqa 192(xmm12=%xmm12 movdqa 192(%rdi),%xmm12 # qhasm: xmm13 = *(int128 *)(c + 208) # asm 1: movdqa 208(xmm13=int6464#14 # asm 2: movdqa 208(xmm13=%xmm13 movdqa 208(%rdi),%xmm13 # qhasm: xmm14 = *(int128 *)(c + 224) # asm 1: movdqa 224(xmm14=int6464#15 # asm 2: movdqa 224(xmm14=%xmm14 movdqa 224(%rdi),%xmm14 # qhasm: xmm15 = *(int128 *)(c + 240) # asm 1: movdqa 240(xmm15=int6464#16 # asm 2: movdqa 240(xmm15=%xmm15 movdqa 240(%rdi),%xmm15 # qhasm: xmm8 ^= ONE # asm 1: pxor ONE,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,xmm11=int6464#9 # asm 2: movdqa xmm11=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm10 = xmm1 # asm 1: movdqa xmm10=int6464#10 # asm 2: movdqa xmm10=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm9 = xmm5 # asm 1: movdqa xmm9=int6464#11 # asm 2: movdqa xmm9=%xmm10 movdqa %xmm5,%xmm10 # qhasm: xmm13 = xmm3 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm3,%xmm11 # qhasm: xmm12 = xmm4 # asm 1: movdqa xmm12=int6464#13 # asm 2: movdqa xmm12=%xmm12 movdqa %xmm4,%xmm12 # qhasm: xmm11 ^= xmm6 # asm 1: pxor xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm8,%xmm13 # qhasm: xmm8 = xmm10 # asm 1: movdqa xmm8=int6464#15 # asm 2: movdqa xmm8=%xmm14 movdqa %xmm9,%xmm14 # qhasm: xmm15 = xmm11 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm8,%xmm15 # qhasm: xmm10 |= xmm9 # asm 1: por xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm2,%xmm10 # qhasm: xmm12 ^= xmm0 # asm 1: pxor xmm13=int6464#11 # asm 2: movdqa xmm13=%xmm10 movdqa %xmm7,%xmm10 # qhasm: xmm13 ^= xmm1 # asm 1: pxor xmm12=int6464#12 # asm 2: movdqa xmm12=%xmm11 movdqa %xmm5,%xmm11 # qhasm: xmm9 = xmm13 # asm 1: movdqa xmm9=int6464#13 # asm 2: movdqa xmm9=%xmm12 movdqa %xmm10,%xmm12 # qhasm: xmm12 ^= xmm4 # asm 1: pxor xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm3,%xmm10 # qhasm: xmm13 = xmm6 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm6,%xmm11 # qhasm: xmm14 = xmm1 # asm 1: movdqa xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm1,%xmm13 # qhasm: xmm15 = xmm7 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm7,%xmm15 # qhasm: xmm12 &= xmm2 # asm 1: pand xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm8,%xmm10 # qhasm: xmm12 ^= xmm10 # asm 1: pxor xmm14=int6464#12 # asm 2: movdqa xmm14=%xmm11 movdqa %xmm14,%xmm11 # qhasm: xmm14 ^= xmm11 # asm 1: pxor xmm15=int6464#14 # asm 2: movdqa xmm15=%xmm13 movdqa %xmm10,%xmm13 # qhasm: xmm15 &= xmm14 # asm 1: pand xmm13=int6464#16 # asm 2: movdqa xmm13=%xmm15 movdqa %xmm12,%xmm15 # qhasm: xmm13 ^= xmm8 # asm 1: pxor xmm10=int6464#9 # asm 2: movdqa xmm10=%xmm8 movdqa %xmm11,%xmm8 # qhasm: xmm10 ^= xmm13 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm4,%xmm8 # qhasm: xmm8 = xmm5 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm5,%xmm9 # qhasm: xmm10 = xmm15 # asm 1: movdqa xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm10 ^= xmm14 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm8 = xmm1 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm12 ^= xmm6 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm8=int6464#9 # asm 2: movdqa 256(xmm8=%xmm8 movdqa 256(%rdi),%xmm8 # qhasm: xmm9 = *(int128 *)(c + 272) # asm 1: movdqa 272(xmm9=int6464#10 # asm 2: movdqa 272(xmm9=%xmm9 movdqa 272(%rdi),%xmm9 # qhasm: xmm10 = *(int128 *)(c + 288) # asm 1: movdqa 288(xmm10=int6464#11 # asm 2: movdqa 288(xmm10=%xmm10 movdqa 288(%rdi),%xmm10 # qhasm: xmm11 = *(int128 *)(c + 304) # asm 1: movdqa 304(xmm11=int6464#12 # asm 2: movdqa 304(xmm11=%xmm11 movdqa 304(%rdi),%xmm11 # qhasm: xmm12 = *(int128 *)(c + 320) # asm 1: movdqa 320(xmm12=int6464#13 # asm 2: movdqa 320(xmm12=%xmm12 movdqa 320(%rdi),%xmm12 # qhasm: xmm13 = *(int128 *)(c + 336) # asm 1: movdqa 336(xmm13=int6464#14 # asm 2: movdqa 336(xmm13=%xmm13 movdqa 336(%rdi),%xmm13 # qhasm: xmm14 = *(int128 *)(c + 352) # asm 1: movdqa 352(xmm14=int6464#15 # asm 2: movdqa 352(xmm14=%xmm14 movdqa 352(%rdi),%xmm14 # qhasm: xmm15 = *(int128 *)(c + 368) # asm 1: movdqa 368(xmm15=int6464#16 # asm 2: movdqa 368(xmm15=%xmm15 movdqa 368(%rdi),%xmm15 # qhasm: xmm8 ^= ONE # asm 1: pxor ONE,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,xmm11=int6464#9 # asm 2: movdqa xmm11=%xmm8 movdqa %xmm5,%xmm8 # qhasm: xmm10 = xmm1 # asm 1: movdqa xmm10=int6464#10 # asm 2: movdqa xmm10=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm9 = xmm7 # asm 1: movdqa xmm9=int6464#11 # asm 2: movdqa xmm9=%xmm10 movdqa %xmm7,%xmm10 # qhasm: xmm13 = xmm6 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm6,%xmm11 # qhasm: xmm12 = xmm3 # asm 1: movdqa xmm12=int6464#13 # asm 2: movdqa xmm12=%xmm12 movdqa %xmm3,%xmm12 # qhasm: xmm11 ^= xmm2 # asm 1: pxor xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm8,%xmm13 # qhasm: xmm8 = xmm10 # asm 1: movdqa xmm8=int6464#15 # asm 2: movdqa xmm8=%xmm14 movdqa %xmm9,%xmm14 # qhasm: xmm15 = xmm11 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm8,%xmm15 # qhasm: xmm10 |= xmm9 # asm 1: por xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm4,%xmm10 # qhasm: xmm12 ^= xmm0 # asm 1: pxor xmm13=int6464#11 # asm 2: movdqa xmm13=%xmm10 movdqa %xmm5,%xmm10 # qhasm: xmm13 ^= xmm1 # asm 1: pxor xmm12=int6464#12 # asm 2: movdqa xmm12=%xmm11 movdqa %xmm7,%xmm11 # qhasm: xmm9 = xmm13 # asm 1: movdqa xmm9=int6464#13 # asm 2: movdqa xmm9=%xmm12 movdqa %xmm10,%xmm12 # qhasm: xmm12 ^= xmm3 # asm 1: pxor xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm6,%xmm10 # qhasm: xmm13 = xmm2 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm2,%xmm11 # qhasm: xmm14 = xmm1 # asm 1: movdqa xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm1,%xmm13 # qhasm: xmm15 = xmm5 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm5,%xmm15 # qhasm: xmm12 &= xmm4 # asm 1: pand xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm8,%xmm10 # qhasm: xmm12 ^= xmm10 # asm 1: pxor xmm14=int6464#12 # asm 2: movdqa xmm14=%xmm11 movdqa %xmm14,%xmm11 # qhasm: xmm14 ^= xmm11 # asm 1: pxor xmm15=int6464#14 # asm 2: movdqa xmm15=%xmm13 movdqa %xmm10,%xmm13 # qhasm: xmm15 &= xmm14 # asm 1: pand xmm13=int6464#16 # asm 2: movdqa xmm13=%xmm15 movdqa %xmm12,%xmm15 # qhasm: xmm13 ^= xmm8 # asm 1: pxor xmm10=int6464#9 # asm 2: movdqa xmm10=%xmm8 movdqa %xmm11,%xmm8 # qhasm: xmm10 ^= xmm13 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm3,%xmm8 # qhasm: xmm8 = xmm7 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm7,%xmm9 # qhasm: xmm10 = xmm15 # asm 1: movdqa xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm10 ^= xmm14 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm5,%xmm8 # qhasm: xmm8 = xmm1 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm12 ^= xmm2 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm8=int6464#9 # asm 2: movdqa 384(xmm8=%xmm8 movdqa 384(%rdi),%xmm8 # qhasm: xmm9 = *(int128 *)(c + 400) # asm 1: movdqa 400(xmm9=int6464#10 # asm 2: movdqa 400(xmm9=%xmm9 movdqa 400(%rdi),%xmm9 # qhasm: xmm10 = *(int128 *)(c + 416) # asm 1: movdqa 416(xmm10=int6464#11 # asm 2: movdqa 416(xmm10=%xmm10 movdqa 416(%rdi),%xmm10 # qhasm: xmm11 = *(int128 *)(c + 432) # asm 1: movdqa 432(xmm11=int6464#12 # asm 2: movdqa 432(xmm11=%xmm11 movdqa 432(%rdi),%xmm11 # qhasm: xmm12 = *(int128 *)(c + 448) # asm 1: movdqa 448(xmm12=int6464#13 # asm 2: movdqa 448(xmm12=%xmm12 movdqa 448(%rdi),%xmm12 # qhasm: xmm13 = *(int128 *)(c + 464) # asm 1: movdqa 464(xmm13=int6464#14 # asm 2: movdqa 464(xmm13=%xmm13 movdqa 464(%rdi),%xmm13 # qhasm: xmm14 = *(int128 *)(c + 480) # asm 1: movdqa 480(xmm14=int6464#15 # asm 2: movdqa 480(xmm14=%xmm14 movdqa 480(%rdi),%xmm14 # qhasm: xmm15 = *(int128 *)(c + 496) # asm 1: movdqa 496(xmm15=int6464#16 # asm 2: movdqa 496(xmm15=%xmm15 movdqa 496(%rdi),%xmm15 # qhasm: xmm8 ^= ONE # asm 1: pxor ONE,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,xmm11=int6464#9 # asm 2: movdqa xmm11=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm10 = xmm1 # asm 1: movdqa xmm10=int6464#10 # asm 2: movdqa xmm10=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm9 = xmm5 # asm 1: movdqa xmm9=int6464#11 # asm 2: movdqa xmm9=%xmm10 movdqa %xmm5,%xmm10 # qhasm: xmm13 = xmm2 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm2,%xmm11 # qhasm: xmm12 = xmm6 # asm 1: movdqa xmm12=int6464#13 # asm 2: movdqa xmm12=%xmm12 movdqa %xmm6,%xmm12 # qhasm: xmm11 ^= xmm4 # asm 1: pxor xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm8,%xmm13 # qhasm: xmm8 = xmm10 # asm 1: movdqa xmm8=int6464#15 # asm 2: movdqa xmm8=%xmm14 movdqa %xmm9,%xmm14 # qhasm: xmm15 = xmm11 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm8,%xmm15 # qhasm: xmm10 |= xmm9 # asm 1: por xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm3,%xmm10 # qhasm: xmm12 ^= xmm0 # asm 1: pxor xmm13=int6464#11 # asm 2: movdqa xmm13=%xmm10 movdqa %xmm7,%xmm10 # qhasm: xmm13 ^= xmm1 # asm 1: pxor xmm12=int6464#12 # asm 2: movdqa xmm12=%xmm11 movdqa %xmm5,%xmm11 # qhasm: xmm9 = xmm13 # asm 1: movdqa xmm9=int6464#13 # asm 2: movdqa xmm9=%xmm12 movdqa %xmm10,%xmm12 # qhasm: xmm12 ^= xmm6 # asm 1: pxor xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm2,%xmm10 # qhasm: xmm13 = xmm4 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm4,%xmm11 # qhasm: xmm14 = xmm1 # asm 1: movdqa xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm1,%xmm13 # qhasm: xmm15 = xmm7 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm7,%xmm15 # qhasm: xmm12 &= xmm3 # asm 1: pand xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm8,%xmm10 # qhasm: xmm12 ^= xmm10 # asm 1: pxor xmm14=int6464#12 # asm 2: movdqa xmm14=%xmm11 movdqa %xmm14,%xmm11 # qhasm: xmm14 ^= xmm11 # asm 1: pxor xmm15=int6464#14 # asm 2: movdqa xmm15=%xmm13 movdqa %xmm10,%xmm13 # qhasm: xmm15 &= xmm14 # asm 1: pand xmm13=int6464#16 # asm 2: movdqa xmm13=%xmm15 movdqa %xmm12,%xmm15 # qhasm: xmm13 ^= xmm8 # asm 1: pxor xmm10=int6464#9 # asm 2: movdqa xmm10=%xmm8 movdqa %xmm11,%xmm8 # qhasm: xmm10 ^= xmm13 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm6,%xmm8 # qhasm: xmm8 = xmm5 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm5,%xmm9 # qhasm: xmm10 = xmm15 # asm 1: movdqa xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm10 ^= xmm14 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm8 = xmm1 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm12 ^= xmm4 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm8=int6464#9 # asm 2: movdqa 512(xmm8=%xmm8 movdqa 512(%rdi),%xmm8 # qhasm: xmm9 = *(int128 *)(c + 528) # asm 1: movdqa 528(xmm9=int6464#10 # asm 2: movdqa 528(xmm9=%xmm9 movdqa 528(%rdi),%xmm9 # qhasm: xmm10 = *(int128 *)(c + 544) # asm 1: movdqa 544(xmm10=int6464#11 # asm 2: movdqa 544(xmm10=%xmm10 movdqa 544(%rdi),%xmm10 # qhasm: xmm11 = *(int128 *)(c + 560) # asm 1: movdqa 560(xmm11=int6464#12 # asm 2: movdqa 560(xmm11=%xmm11 movdqa 560(%rdi),%xmm11 # qhasm: xmm12 = *(int128 *)(c + 576) # asm 1: movdqa 576(xmm12=int6464#13 # asm 2: movdqa 576(xmm12=%xmm12 movdqa 576(%rdi),%xmm12 # qhasm: xmm13 = *(int128 *)(c + 592) # asm 1: movdqa 592(xmm13=int6464#14 # asm 2: movdqa 592(xmm13=%xmm13 movdqa 592(%rdi),%xmm13 # qhasm: xmm14 = *(int128 *)(c + 608) # asm 1: movdqa 608(xmm14=int6464#15 # asm 2: movdqa 608(xmm14=%xmm14 movdqa 608(%rdi),%xmm14 # qhasm: xmm15 = *(int128 *)(c + 624) # asm 1: movdqa 624(xmm15=int6464#16 # asm 2: movdqa 624(xmm15=%xmm15 movdqa 624(%rdi),%xmm15 # qhasm: xmm8 ^= ONE # asm 1: pxor ONE,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,xmm11=int6464#9 # asm 2: movdqa xmm11=%xmm8 movdqa %xmm5,%xmm8 # qhasm: xmm10 = xmm1 # asm 1: movdqa xmm10=int6464#10 # asm 2: movdqa xmm10=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm9 = xmm7 # asm 1: movdqa xmm9=int6464#11 # asm 2: movdqa xmm9=%xmm10 movdqa %xmm7,%xmm10 # qhasm: xmm13 = xmm4 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm4,%xmm11 # qhasm: xmm12 = xmm2 # asm 1: movdqa xmm12=int6464#13 # asm 2: movdqa xmm12=%xmm12 movdqa %xmm2,%xmm12 # qhasm: xmm11 ^= xmm3 # asm 1: pxor xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm8,%xmm13 # qhasm: xmm8 = xmm10 # asm 1: movdqa xmm8=int6464#15 # asm 2: movdqa xmm8=%xmm14 movdqa %xmm9,%xmm14 # qhasm: xmm15 = xmm11 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm8,%xmm15 # qhasm: xmm10 |= xmm9 # asm 1: por xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm6,%xmm10 # qhasm: xmm12 ^= xmm0 # asm 1: pxor xmm13=int6464#11 # asm 2: movdqa xmm13=%xmm10 movdqa %xmm5,%xmm10 # qhasm: xmm13 ^= xmm1 # asm 1: pxor xmm12=int6464#12 # asm 2: movdqa xmm12=%xmm11 movdqa %xmm7,%xmm11 # qhasm: xmm9 = xmm13 # asm 1: movdqa xmm9=int6464#13 # asm 2: movdqa xmm9=%xmm12 movdqa %xmm10,%xmm12 # qhasm: xmm12 ^= xmm2 # asm 1: pxor xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm4,%xmm10 # qhasm: xmm13 = xmm3 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm3,%xmm11 # qhasm: xmm14 = xmm1 # asm 1: movdqa xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm1,%xmm13 # qhasm: xmm15 = xmm5 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm5,%xmm15 # qhasm: xmm12 &= xmm6 # asm 1: pand xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm8,%xmm10 # qhasm: xmm12 ^= xmm10 # asm 1: pxor xmm14=int6464#12 # asm 2: movdqa xmm14=%xmm11 movdqa %xmm14,%xmm11 # qhasm: xmm14 ^= xmm11 # asm 1: pxor xmm15=int6464#14 # asm 2: movdqa xmm15=%xmm13 movdqa %xmm10,%xmm13 # qhasm: xmm15 &= xmm14 # asm 1: pand xmm13=int6464#16 # asm 2: movdqa xmm13=%xmm15 movdqa %xmm12,%xmm15 # qhasm: xmm13 ^= xmm8 # asm 1: pxor xmm10=int6464#9 # asm 2: movdqa xmm10=%xmm8 movdqa %xmm11,%xmm8 # qhasm: xmm10 ^= xmm13 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm2,%xmm8 # qhasm: xmm8 = xmm7 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm7,%xmm9 # qhasm: xmm10 = xmm15 # asm 1: movdqa xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm10 ^= xmm14 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm5,%xmm8 # qhasm: xmm8 = xmm1 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm12 ^= xmm3 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm8=int6464#9 # asm 2: movdqa 640(xmm8=%xmm8 movdqa 640(%rdi),%xmm8 # qhasm: xmm9 = *(int128 *)(c + 656) # asm 1: movdqa 656(xmm9=int6464#10 # asm 2: movdqa 656(xmm9=%xmm9 movdqa 656(%rdi),%xmm9 # qhasm: xmm10 = *(int128 *)(c + 672) # asm 1: movdqa 672(xmm10=int6464#11 # asm 2: movdqa 672(xmm10=%xmm10 movdqa 672(%rdi),%xmm10 # qhasm: xmm11 = *(int128 *)(c + 688) # asm 1: movdqa 688(xmm11=int6464#12 # asm 2: movdqa 688(xmm11=%xmm11 movdqa 688(%rdi),%xmm11 # qhasm: xmm12 = *(int128 *)(c + 704) # asm 1: movdqa 704(xmm12=int6464#13 # asm 2: movdqa 704(xmm12=%xmm12 movdqa 704(%rdi),%xmm12 # qhasm: xmm13 = *(int128 *)(c + 720) # asm 1: movdqa 720(xmm13=int6464#14 # asm 2: movdqa 720(xmm13=%xmm13 movdqa 720(%rdi),%xmm13 # qhasm: xmm14 = *(int128 *)(c + 736) # asm 1: movdqa 736(xmm14=int6464#15 # asm 2: movdqa 736(xmm14=%xmm14 movdqa 736(%rdi),%xmm14 # qhasm: xmm15 = *(int128 *)(c + 752) # asm 1: movdqa 752(xmm15=int6464#16 # asm 2: movdqa 752(xmm15=%xmm15 movdqa 752(%rdi),%xmm15 # qhasm: xmm8 ^= ONE # asm 1: pxor ONE,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,xmm11=int6464#9 # asm 2: movdqa xmm11=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm10 = xmm1 # asm 1: movdqa xmm10=int6464#10 # asm 2: movdqa xmm10=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm9 = xmm5 # asm 1: movdqa xmm9=int6464#11 # asm 2: movdqa xmm9=%xmm10 movdqa %xmm5,%xmm10 # qhasm: xmm13 = xmm3 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm3,%xmm11 # qhasm: xmm12 = xmm4 # asm 1: movdqa xmm12=int6464#13 # asm 2: movdqa xmm12=%xmm12 movdqa %xmm4,%xmm12 # qhasm: xmm11 ^= xmm6 # asm 1: pxor xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm8,%xmm13 # qhasm: xmm8 = xmm10 # asm 1: movdqa xmm8=int6464#15 # asm 2: movdqa xmm8=%xmm14 movdqa %xmm9,%xmm14 # qhasm: xmm15 = xmm11 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm8,%xmm15 # qhasm: xmm10 |= xmm9 # asm 1: por xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm2,%xmm10 # qhasm: xmm12 ^= xmm0 # asm 1: pxor xmm13=int6464#11 # asm 2: movdqa xmm13=%xmm10 movdqa %xmm7,%xmm10 # qhasm: xmm13 ^= xmm1 # asm 1: pxor xmm12=int6464#12 # asm 2: movdqa xmm12=%xmm11 movdqa %xmm5,%xmm11 # qhasm: xmm9 = xmm13 # asm 1: movdqa xmm9=int6464#13 # asm 2: movdqa xmm9=%xmm12 movdqa %xmm10,%xmm12 # qhasm: xmm12 ^= xmm4 # asm 1: pxor xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm3,%xmm10 # qhasm: xmm13 = xmm6 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm6,%xmm11 # qhasm: xmm14 = xmm1 # asm 1: movdqa xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm1,%xmm13 # qhasm: xmm15 = xmm7 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm7,%xmm15 # qhasm: xmm12 &= xmm2 # asm 1: pand xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm8,%xmm10 # qhasm: xmm12 ^= xmm10 # asm 1: pxor xmm14=int6464#12 # asm 2: movdqa xmm14=%xmm11 movdqa %xmm14,%xmm11 # qhasm: xmm14 ^= xmm11 # asm 1: pxor xmm15=int6464#14 # asm 2: movdqa xmm15=%xmm13 movdqa %xmm10,%xmm13 # qhasm: xmm15 &= xmm14 # asm 1: pand xmm13=int6464#16 # asm 2: movdqa xmm13=%xmm15 movdqa %xmm12,%xmm15 # qhasm: xmm13 ^= xmm8 # asm 1: pxor xmm10=int6464#9 # asm 2: movdqa xmm10=%xmm8 movdqa %xmm11,%xmm8 # qhasm: xmm10 ^= xmm13 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm4,%xmm8 # qhasm: xmm8 = xmm5 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm5,%xmm9 # qhasm: xmm10 = xmm15 # asm 1: movdqa xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm10 ^= xmm14 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm8 = xmm1 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm12 ^= xmm6 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm8=int6464#9 # asm 2: movdqa 768(xmm8=%xmm8 movdqa 768(%rdi),%xmm8 # qhasm: xmm9 = *(int128 *)(c + 784) # asm 1: movdqa 784(xmm9=int6464#10 # asm 2: movdqa 784(xmm9=%xmm9 movdqa 784(%rdi),%xmm9 # qhasm: xmm10 = *(int128 *)(c + 800) # asm 1: movdqa 800(xmm10=int6464#11 # asm 2: movdqa 800(xmm10=%xmm10 movdqa 800(%rdi),%xmm10 # qhasm: xmm11 = *(int128 *)(c + 816) # asm 1: movdqa 816(xmm11=int6464#12 # asm 2: movdqa 816(xmm11=%xmm11 movdqa 816(%rdi),%xmm11 # qhasm: xmm12 = *(int128 *)(c + 832) # asm 1: movdqa 832(xmm12=int6464#13 # asm 2: movdqa 832(xmm12=%xmm12 movdqa 832(%rdi),%xmm12 # qhasm: xmm13 = *(int128 *)(c + 848) # asm 1: movdqa 848(xmm13=int6464#14 # asm 2: movdqa 848(xmm13=%xmm13 movdqa 848(%rdi),%xmm13 # qhasm: xmm14 = *(int128 *)(c + 864) # asm 1: movdqa 864(xmm14=int6464#15 # asm 2: movdqa 864(xmm14=%xmm14 movdqa 864(%rdi),%xmm14 # qhasm: xmm15 = *(int128 *)(c + 880) # asm 1: movdqa 880(xmm15=int6464#16 # asm 2: movdqa 880(xmm15=%xmm15 movdqa 880(%rdi),%xmm15 # qhasm: xmm8 ^= ONE # asm 1: pxor ONE,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,xmm11=int6464#9 # asm 2: movdqa xmm11=%xmm8 movdqa %xmm5,%xmm8 # qhasm: xmm10 = xmm1 # asm 1: movdqa xmm10=int6464#10 # asm 2: movdqa xmm10=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm9 = xmm7 # asm 1: movdqa xmm9=int6464#11 # asm 2: movdqa xmm9=%xmm10 movdqa %xmm7,%xmm10 # qhasm: xmm13 = xmm6 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm6,%xmm11 # qhasm: xmm12 = xmm3 # asm 1: movdqa xmm12=int6464#13 # asm 2: movdqa xmm12=%xmm12 movdqa %xmm3,%xmm12 # qhasm: xmm11 ^= xmm2 # asm 1: pxor xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm8,%xmm13 # qhasm: xmm8 = xmm10 # asm 1: movdqa xmm8=int6464#15 # asm 2: movdqa xmm8=%xmm14 movdqa %xmm9,%xmm14 # qhasm: xmm15 = xmm11 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm8,%xmm15 # qhasm: xmm10 |= xmm9 # asm 1: por xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm4,%xmm10 # qhasm: xmm12 ^= xmm0 # asm 1: pxor xmm13=int6464#11 # asm 2: movdqa xmm13=%xmm10 movdqa %xmm5,%xmm10 # qhasm: xmm13 ^= xmm1 # asm 1: pxor xmm12=int6464#12 # asm 2: movdqa xmm12=%xmm11 movdqa %xmm7,%xmm11 # qhasm: xmm9 = xmm13 # asm 1: movdqa xmm9=int6464#13 # asm 2: movdqa xmm9=%xmm12 movdqa %xmm10,%xmm12 # qhasm: xmm12 ^= xmm3 # asm 1: pxor xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm6,%xmm10 # qhasm: xmm13 = xmm2 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm2,%xmm11 # qhasm: xmm14 = xmm1 # asm 1: movdqa xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm1,%xmm13 # qhasm: xmm15 = xmm5 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm5,%xmm15 # qhasm: xmm12 &= xmm4 # asm 1: pand xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm8,%xmm10 # qhasm: xmm12 ^= xmm10 # asm 1: pxor xmm14=int6464#12 # asm 2: movdqa xmm14=%xmm11 movdqa %xmm14,%xmm11 # qhasm: xmm14 ^= xmm11 # asm 1: pxor xmm15=int6464#14 # asm 2: movdqa xmm15=%xmm13 movdqa %xmm10,%xmm13 # qhasm: xmm15 &= xmm14 # asm 1: pand xmm13=int6464#16 # asm 2: movdqa xmm13=%xmm15 movdqa %xmm12,%xmm15 # qhasm: xmm13 ^= xmm8 # asm 1: pxor xmm10=int6464#9 # asm 2: movdqa xmm10=%xmm8 movdqa %xmm11,%xmm8 # qhasm: xmm10 ^= xmm13 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm3,%xmm8 # qhasm: xmm8 = xmm7 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm7,%xmm9 # qhasm: xmm10 = xmm15 # asm 1: movdqa xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm10 ^= xmm14 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm5,%xmm8 # qhasm: xmm8 = xmm1 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm12 ^= xmm2 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm8=int6464#9 # asm 2: movdqa 896(xmm8=%xmm8 movdqa 896(%rdi),%xmm8 # qhasm: xmm9 = *(int128 *)(c + 912) # asm 1: movdqa 912(xmm9=int6464#10 # asm 2: movdqa 912(xmm9=%xmm9 movdqa 912(%rdi),%xmm9 # qhasm: xmm10 = *(int128 *)(c + 928) # asm 1: movdqa 928(xmm10=int6464#11 # asm 2: movdqa 928(xmm10=%xmm10 movdqa 928(%rdi),%xmm10 # qhasm: xmm11 = *(int128 *)(c + 944) # asm 1: movdqa 944(xmm11=int6464#12 # asm 2: movdqa 944(xmm11=%xmm11 movdqa 944(%rdi),%xmm11 # qhasm: xmm12 = *(int128 *)(c + 960) # asm 1: movdqa 960(xmm12=int6464#13 # asm 2: movdqa 960(xmm12=%xmm12 movdqa 960(%rdi),%xmm12 # qhasm: xmm13 = *(int128 *)(c + 976) # asm 1: movdqa 976(xmm13=int6464#14 # asm 2: movdqa 976(xmm13=%xmm13 movdqa 976(%rdi),%xmm13 # qhasm: xmm14 = *(int128 *)(c + 992) # asm 1: movdqa 992(xmm14=int6464#15 # asm 2: movdqa 992(xmm14=%xmm14 movdqa 992(%rdi),%xmm14 # qhasm: xmm15 = *(int128 *)(c + 1008) # asm 1: movdqa 1008(xmm15=int6464#16 # asm 2: movdqa 1008(xmm15=%xmm15 movdqa 1008(%rdi),%xmm15 # qhasm: xmm8 ^= ONE # asm 1: pxor ONE,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,xmm11=int6464#9 # asm 2: movdqa xmm11=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm10 = xmm1 # asm 1: movdqa xmm10=int6464#10 # asm 2: movdqa xmm10=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm9 = xmm5 # asm 1: movdqa xmm9=int6464#11 # asm 2: movdqa xmm9=%xmm10 movdqa %xmm5,%xmm10 # qhasm: xmm13 = xmm2 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm2,%xmm11 # qhasm: xmm12 = xmm6 # asm 1: movdqa xmm12=int6464#13 # asm 2: movdqa xmm12=%xmm12 movdqa %xmm6,%xmm12 # qhasm: xmm11 ^= xmm4 # asm 1: pxor xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm8,%xmm13 # qhasm: xmm8 = xmm10 # asm 1: movdqa xmm8=int6464#15 # asm 2: movdqa xmm8=%xmm14 movdqa %xmm9,%xmm14 # qhasm: xmm15 = xmm11 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm8,%xmm15 # qhasm: xmm10 |= xmm9 # asm 1: por xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm3,%xmm10 # qhasm: xmm12 ^= xmm0 # asm 1: pxor xmm13=int6464#11 # asm 2: movdqa xmm13=%xmm10 movdqa %xmm7,%xmm10 # qhasm: xmm13 ^= xmm1 # asm 1: pxor xmm12=int6464#12 # asm 2: movdqa xmm12=%xmm11 movdqa %xmm5,%xmm11 # qhasm: xmm9 = xmm13 # asm 1: movdqa xmm9=int6464#13 # asm 2: movdqa xmm9=%xmm12 movdqa %xmm10,%xmm12 # qhasm: xmm12 ^= xmm6 # asm 1: pxor xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm2,%xmm10 # qhasm: xmm13 = xmm4 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm4,%xmm11 # qhasm: xmm14 = xmm1 # asm 1: movdqa xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm1,%xmm13 # qhasm: xmm15 = xmm7 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm7,%xmm15 # qhasm: xmm12 &= xmm3 # asm 1: pand xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm8,%xmm10 # qhasm: xmm12 ^= xmm10 # asm 1: pxor xmm14=int6464#12 # asm 2: movdqa xmm14=%xmm11 movdqa %xmm14,%xmm11 # qhasm: xmm14 ^= xmm11 # asm 1: pxor xmm15=int6464#14 # asm 2: movdqa xmm15=%xmm13 movdqa %xmm10,%xmm13 # qhasm: xmm15 &= xmm14 # asm 1: pand xmm13=int6464#16 # asm 2: movdqa xmm13=%xmm15 movdqa %xmm12,%xmm15 # qhasm: xmm13 ^= xmm8 # asm 1: pxor xmm10=int6464#9 # asm 2: movdqa xmm10=%xmm8 movdqa %xmm11,%xmm8 # qhasm: xmm10 ^= xmm13 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm6,%xmm8 # qhasm: xmm8 = xmm5 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm5,%xmm9 # qhasm: xmm10 = xmm15 # asm 1: movdqa xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm10 ^= xmm14 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm8 = xmm1 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm12 ^= xmm4 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm8=int6464#9 # asm 2: movdqa 1024(xmm8=%xmm8 movdqa 1024(%rdi),%xmm8 # qhasm: xmm9 = *(int128 *)(c + 1040) # asm 1: movdqa 1040(xmm9=int6464#10 # asm 2: movdqa 1040(xmm9=%xmm9 movdqa 1040(%rdi),%xmm9 # qhasm: xmm10 = *(int128 *)(c + 1056) # asm 1: movdqa 1056(xmm10=int6464#11 # asm 2: movdqa 1056(xmm10=%xmm10 movdqa 1056(%rdi),%xmm10 # qhasm: xmm11 = *(int128 *)(c + 1072) # asm 1: movdqa 1072(xmm11=int6464#12 # asm 2: movdqa 1072(xmm11=%xmm11 movdqa 1072(%rdi),%xmm11 # qhasm: xmm12 = *(int128 *)(c + 1088) # asm 1: movdqa 1088(xmm12=int6464#13 # asm 2: movdqa 1088(xmm12=%xmm12 movdqa 1088(%rdi),%xmm12 # qhasm: xmm13 = *(int128 *)(c + 1104) # asm 1: movdqa 1104(xmm13=int6464#14 # asm 2: movdqa 1104(xmm13=%xmm13 movdqa 1104(%rdi),%xmm13 # qhasm: xmm14 = *(int128 *)(c + 1120) # asm 1: movdqa 1120(xmm14=int6464#15 # asm 2: movdqa 1120(xmm14=%xmm14 movdqa 1120(%rdi),%xmm14 # qhasm: xmm15 = *(int128 *)(c + 1136) # asm 1: movdqa 1136(xmm15=int6464#16 # asm 2: movdqa 1136(xmm15=%xmm15 movdqa 1136(%rdi),%xmm15 # qhasm: xmm8 ^= ONE # asm 1: pxor ONE,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,xmm11=int6464#9 # asm 2: movdqa xmm11=%xmm8 movdqa %xmm5,%xmm8 # qhasm: xmm10 = xmm1 # asm 1: movdqa xmm10=int6464#10 # asm 2: movdqa xmm10=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm9 = xmm7 # asm 1: movdqa xmm9=int6464#11 # asm 2: movdqa xmm9=%xmm10 movdqa %xmm7,%xmm10 # qhasm: xmm13 = xmm4 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm4,%xmm11 # qhasm: xmm12 = xmm2 # asm 1: movdqa xmm12=int6464#13 # asm 2: movdqa xmm12=%xmm12 movdqa %xmm2,%xmm12 # qhasm: xmm11 ^= xmm3 # asm 1: pxor xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm8,%xmm13 # qhasm: xmm8 = xmm10 # asm 1: movdqa xmm8=int6464#15 # asm 2: movdqa xmm8=%xmm14 movdqa %xmm9,%xmm14 # qhasm: xmm15 = xmm11 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm8,%xmm15 # qhasm: xmm10 |= xmm9 # asm 1: por xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm6,%xmm10 # qhasm: xmm12 ^= xmm0 # asm 1: pxor xmm13=int6464#11 # asm 2: movdqa xmm13=%xmm10 movdqa %xmm5,%xmm10 # qhasm: xmm13 ^= xmm1 # asm 1: pxor xmm12=int6464#12 # asm 2: movdqa xmm12=%xmm11 movdqa %xmm7,%xmm11 # qhasm: xmm9 = xmm13 # asm 1: movdqa xmm9=int6464#13 # asm 2: movdqa xmm9=%xmm12 movdqa %xmm10,%xmm12 # qhasm: xmm12 ^= xmm2 # asm 1: pxor xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm4,%xmm10 # qhasm: xmm13 = xmm3 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm3,%xmm11 # qhasm: xmm14 = xmm1 # asm 1: movdqa xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm1,%xmm13 # qhasm: xmm15 = xmm5 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm5,%xmm15 # qhasm: xmm12 &= xmm6 # asm 1: pand xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm8,%xmm10 # qhasm: xmm12 ^= xmm10 # asm 1: pxor xmm14=int6464#12 # asm 2: movdqa xmm14=%xmm11 movdqa %xmm14,%xmm11 # qhasm: xmm14 ^= xmm11 # asm 1: pxor xmm15=int6464#14 # asm 2: movdqa xmm15=%xmm13 movdqa %xmm10,%xmm13 # qhasm: xmm15 &= xmm14 # asm 1: pand xmm13=int6464#16 # asm 2: movdqa xmm13=%xmm15 movdqa %xmm12,%xmm15 # qhasm: xmm13 ^= xmm8 # asm 1: pxor xmm10=int6464#9 # asm 2: movdqa xmm10=%xmm8 movdqa %xmm11,%xmm8 # qhasm: xmm10 ^= xmm13 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm2,%xmm8 # qhasm: xmm8 = xmm7 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm7,%xmm9 # qhasm: xmm10 = xmm15 # asm 1: movdqa xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm10 ^= xmm14 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm5,%xmm8 # qhasm: xmm8 = xmm1 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm12 ^= xmm3 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm8=int6464#9 # asm 2: movdqa 1152(xmm8=%xmm8 movdqa 1152(%rdi),%xmm8 # qhasm: xmm9 = *(int128 *)(c + 1168) # asm 1: movdqa 1168(xmm9=int6464#10 # asm 2: movdqa 1168(xmm9=%xmm9 movdqa 1168(%rdi),%xmm9 # qhasm: xmm10 = *(int128 *)(c + 1184) # asm 1: movdqa 1184(xmm10=int6464#11 # asm 2: movdqa 1184(xmm10=%xmm10 movdqa 1184(%rdi),%xmm10 # qhasm: xmm11 = *(int128 *)(c + 1200) # asm 1: movdqa 1200(xmm11=int6464#12 # asm 2: movdqa 1200(xmm11=%xmm11 movdqa 1200(%rdi),%xmm11 # qhasm: xmm12 = *(int128 *)(c + 1216) # asm 1: movdqa 1216(xmm12=int6464#13 # asm 2: movdqa 1216(xmm12=%xmm12 movdqa 1216(%rdi),%xmm12 # qhasm: xmm13 = *(int128 *)(c + 1232) # asm 1: movdqa 1232(xmm13=int6464#14 # asm 2: movdqa 1232(xmm13=%xmm13 movdqa 1232(%rdi),%xmm13 # qhasm: xmm14 = *(int128 *)(c + 1248) # asm 1: movdqa 1248(xmm14=int6464#15 # asm 2: movdqa 1248(xmm14=%xmm14 movdqa 1248(%rdi),%xmm14 # qhasm: xmm15 = *(int128 *)(c + 1264) # asm 1: movdqa 1264(xmm15=int6464#16 # asm 2: movdqa 1264(xmm15=%xmm15 movdqa 1264(%rdi),%xmm15 # qhasm: xmm8 ^= ONE # asm 1: pxor ONE,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,>= 8 # asm 1: psrld $8,xmm0=int6464#1 # asm 2: movdqa 0(xmm0=%xmm0 movdqa 0(%rcx),%xmm0 # qhasm: nonce_stack = xmm0 # asm 1: movdqa nonce_stack=stack128#1 # asm 2: movdqa nonce_stack=0(%rsp) movdqa %xmm0,0(%rsp) # qhasm: np = &nonce_stack # asm 1: leaq np=int64#4 # asm 2: leaq np=%rcx leaq 0(%rsp),%rcx # qhasm: enc_block: ._enc_block: # qhasm: xmm0 = *(int128 *) (np + 0) # asm 1: movdqa 0(xmm0=int6464#1 # asm 2: movdqa 0(xmm0=%xmm0 movdqa 0(%rcx),%xmm0 # qhasm: xmm1 = xmm0 # asm 1: movdqa xmm1=int6464#2 # asm 2: movdqa xmm1=%xmm1 movdqa %xmm0,%xmm1 # qhasm: shuffle bytes of xmm1 by SWAP32 # asm 1: pshufb SWAP32,xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm1,%xmm2 # qhasm: xmm3 = xmm1 # asm 1: movdqa xmm3=int6464#4 # asm 2: movdqa xmm3=%xmm3 movdqa %xmm1,%xmm3 # qhasm: xmm4 = xmm1 # asm 1: movdqa xmm4=int6464#5 # asm 2: movdqa xmm4=%xmm4 movdqa %xmm1,%xmm4 # qhasm: xmm5 = xmm1 # asm 1: movdqa xmm5=int6464#6 # asm 2: movdqa xmm5=%xmm5 movdqa %xmm1,%xmm5 # qhasm: xmm6 = xmm1 # asm 1: movdqa xmm6=int6464#7 # asm 2: movdqa xmm6=%xmm6 movdqa %xmm1,%xmm6 # qhasm: xmm7 = xmm1 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm1,%xmm7 # qhasm: int32323232 xmm1 += RCTRINC1 # asm 1: paddd RCTRINC1,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm6,%xmm8 # qhasm: uint6464 xmm8 >>= 1 # asm 1: psrlq $1,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm4,%xmm8 # qhasm: uint6464 xmm8 >>= 1 # asm 1: psrlq $1,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm2,%xmm8 # qhasm: uint6464 xmm8 >>= 1 # asm 1: psrlq $1,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm0,%xmm8 # qhasm: uint6464 xmm8 >>= 1 # asm 1: psrlq $1,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm5,%xmm8 # qhasm: uint6464 xmm8 >>= 2 # asm 1: psrlq $2,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm4,%xmm8 # qhasm: uint6464 xmm8 >>= 2 # asm 1: psrlq $2,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm1,%xmm8 # qhasm: uint6464 xmm8 >>= 2 # asm 1: psrlq $2,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm0,%xmm8 # qhasm: uint6464 xmm8 >>= 2 # asm 1: psrlq $2,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm3,%xmm8 # qhasm: uint6464 xmm8 >>= 4 # asm 1: psrlq $4,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm2,%xmm8 # qhasm: uint6464 xmm8 >>= 4 # asm 1: psrlq $4,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm1,%xmm8 # qhasm: uint6464 xmm8 >>= 4 # asm 1: psrlq $4,xmm8=int6464#9 # asm 2: movdqa xmm8=%xmm8 movdqa %xmm0,%xmm8 # qhasm: uint6464 xmm8 >>= 4 # asm 1: psrlq $4,xmm11=int6464#9 # asm 2: movdqa xmm11=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm10 = xmm1 # asm 1: movdqa xmm10=int6464#10 # asm 2: movdqa xmm10=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm9 = xmm5 # asm 1: movdqa xmm9=int6464#11 # asm 2: movdqa xmm9=%xmm10 movdqa %xmm5,%xmm10 # qhasm: xmm13 = xmm2 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm2,%xmm11 # qhasm: xmm12 = xmm6 # asm 1: movdqa xmm12=int6464#13 # asm 2: movdqa xmm12=%xmm12 movdqa %xmm6,%xmm12 # qhasm: xmm11 ^= xmm4 # asm 1: pxor xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm8,%xmm13 # qhasm: xmm8 = xmm10 # asm 1: movdqa xmm8=int6464#15 # asm 2: movdqa xmm8=%xmm14 movdqa %xmm9,%xmm14 # qhasm: xmm15 = xmm11 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm8,%xmm15 # qhasm: xmm10 |= xmm9 # asm 1: por xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm3,%xmm10 # qhasm: xmm12 ^= xmm0 # asm 1: pxor xmm13=int6464#11 # asm 2: movdqa xmm13=%xmm10 movdqa %xmm7,%xmm10 # qhasm: xmm13 ^= xmm1 # asm 1: pxor xmm12=int6464#12 # asm 2: movdqa xmm12=%xmm11 movdqa %xmm5,%xmm11 # qhasm: xmm9 = xmm13 # asm 1: movdqa xmm9=int6464#13 # asm 2: movdqa xmm9=%xmm12 movdqa %xmm10,%xmm12 # qhasm: xmm12 ^= xmm6 # asm 1: pxor xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm2,%xmm10 # qhasm: xmm13 = xmm4 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm4,%xmm11 # qhasm: xmm14 = xmm1 # asm 1: movdqa xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm1,%xmm13 # qhasm: xmm15 = xmm7 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm7,%xmm15 # qhasm: xmm12 &= xmm3 # asm 1: pand xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm8,%xmm10 # qhasm: xmm12 ^= xmm10 # asm 1: pxor xmm14=int6464#12 # asm 2: movdqa xmm14=%xmm11 movdqa %xmm14,%xmm11 # qhasm: xmm14 ^= xmm11 # asm 1: pxor xmm15=int6464#14 # asm 2: movdqa xmm15=%xmm13 movdqa %xmm10,%xmm13 # qhasm: xmm15 &= xmm14 # asm 1: pand xmm13=int6464#16 # asm 2: movdqa xmm13=%xmm15 movdqa %xmm12,%xmm15 # qhasm: xmm13 ^= xmm8 # asm 1: pxor xmm10=int6464#9 # asm 2: movdqa xmm10=%xmm8 movdqa %xmm11,%xmm8 # qhasm: xmm10 ^= xmm13 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm6,%xmm8 # qhasm: xmm8 = xmm5 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm5,%xmm9 # qhasm: xmm10 = xmm15 # asm 1: movdqa xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm10 ^= xmm14 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm8 = xmm1 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm12 ^= xmm4 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm8=int6464#9 # asm 2: pshufd $0x93,xmm8=%xmm8 pshufd $0x93,%xmm0,%xmm8 # qhasm: xmm9 = shuffle dwords of xmm1 by 0x93 # asm 1: pshufd $0x93,xmm9=int6464#10 # asm 2: pshufd $0x93,xmm9=%xmm9 pshufd $0x93,%xmm1,%xmm9 # qhasm: xmm10 = shuffle dwords of xmm4 by 0x93 # asm 1: pshufd $0x93,xmm10=int6464#11 # asm 2: pshufd $0x93,xmm10=%xmm10 pshufd $0x93,%xmm4,%xmm10 # qhasm: xmm11 = shuffle dwords of xmm6 by 0x93 # asm 1: pshufd $0x93,xmm11=int6464#12 # asm 2: pshufd $0x93,xmm11=%xmm11 pshufd $0x93,%xmm6,%xmm11 # qhasm: xmm12 = shuffle dwords of xmm3 by 0x93 # asm 1: pshufd $0x93,xmm12=int6464#13 # asm 2: pshufd $0x93,xmm12=%xmm12 pshufd $0x93,%xmm3,%xmm12 # qhasm: xmm13 = shuffle dwords of xmm7 by 0x93 # asm 1: pshufd $0x93,xmm13=int6464#14 # asm 2: pshufd $0x93,xmm13=%xmm13 pshufd $0x93,%xmm7,%xmm13 # qhasm: xmm14 = shuffle dwords of xmm2 by 0x93 # asm 1: pshufd $0x93,xmm14=int6464#15 # asm 2: pshufd $0x93,xmm14=%xmm14 pshufd $0x93,%xmm2,%xmm14 # qhasm: xmm15 = shuffle dwords of xmm5 by 0x93 # asm 1: pshufd $0x93,xmm15=int6464#16 # asm 2: pshufd $0x93,xmm15=%xmm15 pshufd $0x93,%xmm5,%xmm15 # qhasm: xmm0 ^= xmm8 # asm 1: pxor xmm0=int6464#1 # asm 2: pshufd $0x4E,xmm0=%xmm0 pshufd $0x4E,%xmm0,%xmm0 # qhasm: xmm1 = shuffle dwords of xmm1 by 0x4E # asm 1: pshufd $0x4E,xmm1=int6464#2 # asm 2: pshufd $0x4E,xmm1=%xmm1 pshufd $0x4E,%xmm1,%xmm1 # qhasm: xmm4 = shuffle dwords of xmm4 by 0x4E # asm 1: pshufd $0x4E,xmm4=int6464#5 # asm 2: pshufd $0x4E,xmm4=%xmm4 pshufd $0x4E,%xmm4,%xmm4 # qhasm: xmm6 = shuffle dwords of xmm6 by 0x4E # asm 1: pshufd $0x4E,xmm6=int6464#7 # asm 2: pshufd $0x4E,xmm6=%xmm6 pshufd $0x4E,%xmm6,%xmm6 # qhasm: xmm3 = shuffle dwords of xmm3 by 0x4E # asm 1: pshufd $0x4E,xmm3=int6464#4 # asm 2: pshufd $0x4E,xmm3=%xmm3 pshufd $0x4E,%xmm3,%xmm3 # qhasm: xmm7 = shuffle dwords of xmm7 by 0x4E # asm 1: pshufd $0x4E,xmm7=int6464#8 # asm 2: pshufd $0x4E,xmm7=%xmm7 pshufd $0x4E,%xmm7,%xmm7 # qhasm: xmm2 = shuffle dwords of xmm2 by 0x4E # asm 1: pshufd $0x4E,xmm2=int6464#3 # asm 2: pshufd $0x4E,xmm2=%xmm2 pshufd $0x4E,%xmm2,%xmm2 # qhasm: xmm5 = shuffle dwords of xmm5 by 0x4E # asm 1: pshufd $0x4E,xmm5=int6464#6 # asm 2: pshufd $0x4E,xmm5=%xmm5 pshufd $0x4E,%xmm5,%xmm5 # qhasm: xmm8 ^= xmm0 # asm 1: pxor xmm3=int6464#1 # asm 2: movdqa xmm3=%xmm0 movdqa %xmm15,%xmm0 # qhasm: xmm2 = xmm9 # asm 1: movdqa xmm2=int6464#2 # asm 2: movdqa xmm2=%xmm1 movdqa %xmm9,%xmm1 # qhasm: xmm1 = xmm13 # asm 1: movdqa xmm1=int6464#3 # asm 2: movdqa xmm1=%xmm2 movdqa %xmm13,%xmm2 # qhasm: xmm5 = xmm10 # asm 1: movdqa xmm5=int6464#4 # asm 2: movdqa xmm5=%xmm3 movdqa %xmm10,%xmm3 # qhasm: xmm4 = xmm14 # asm 1: movdqa xmm4=int6464#5 # asm 2: movdqa xmm4=%xmm4 movdqa %xmm14,%xmm4 # qhasm: xmm3 ^= xmm12 # asm 1: pxor xmm6=int6464#6 # asm 2: movdqa xmm6=%xmm5 movdqa %xmm0,%xmm5 # qhasm: xmm0 = xmm2 # asm 1: movdqa xmm0=int6464#7 # asm 2: movdqa xmm0=%xmm6 movdqa %xmm1,%xmm6 # qhasm: xmm7 = xmm3 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm0,%xmm7 # qhasm: xmm2 |= xmm1 # asm 1: por xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm11,%xmm2 # qhasm: xmm4 ^= xmm8 # asm 1: pxor xmm5=int6464#3 # asm 2: movdqa xmm5=%xmm2 movdqa %xmm15,%xmm2 # qhasm: xmm5 ^= xmm9 # asm 1: pxor xmm4=int6464#4 # asm 2: movdqa xmm4=%xmm3 movdqa %xmm13,%xmm3 # qhasm: xmm1 = xmm5 # asm 1: movdqa xmm1=int6464#5 # asm 2: movdqa xmm1=%xmm4 movdqa %xmm2,%xmm4 # qhasm: xmm4 ^= xmm14 # asm 1: pxor xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm10,%xmm2 # qhasm: xmm5 = xmm12 # asm 1: movdqa xmm5=int6464#4 # asm 2: movdqa xmm5=%xmm3 movdqa %xmm12,%xmm3 # qhasm: xmm6 = xmm9 # asm 1: movdqa xmm6=int6464#6 # asm 2: movdqa xmm6=%xmm5 movdqa %xmm9,%xmm5 # qhasm: xmm7 = xmm15 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm15,%xmm7 # qhasm: xmm4 &= xmm11 # asm 1: pand xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm0,%xmm2 # qhasm: xmm4 ^= xmm2 # asm 1: pxor xmm6=int6464#4 # asm 2: movdqa xmm6=%xmm3 movdqa %xmm6,%xmm3 # qhasm: xmm6 ^= xmm3 # asm 1: pxor xmm7=int6464#6 # asm 2: movdqa xmm7=%xmm5 movdqa %xmm2,%xmm5 # qhasm: xmm7 &= xmm6 # asm 1: pand xmm5=int6464#8 # asm 2: movdqa xmm5=%xmm7 movdqa %xmm4,%xmm7 # qhasm: xmm5 ^= xmm0 # asm 1: pxor xmm2=int6464#1 # asm 2: movdqa xmm2=%xmm0 movdqa %xmm3,%xmm0 # qhasm: xmm2 ^= xmm5 # asm 1: pxor xmm4=int6464#1 # asm 2: movdqa xmm4=%xmm0 movdqa %xmm14,%xmm0 # qhasm: xmm0 = xmm13 # asm 1: movdqa xmm0=int6464#2 # asm 2: movdqa xmm0=%xmm1 movdqa %xmm13,%xmm1 # qhasm: xmm2 = xmm7 # asm 1: movdqa xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm2 ^= xmm6 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm7,%xmm2 # qhasm: xmm2 ^= xmm1 # asm 1: pxor xmm4=int6464#1 # asm 2: movdqa xmm4=%xmm0 movdqa %xmm15,%xmm0 # qhasm: xmm0 = xmm9 # asm 1: movdqa xmm0=int6464#2 # asm 2: movdqa xmm0=%xmm1 movdqa %xmm9,%xmm1 # qhasm: xmm4 ^= xmm12 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm7,%xmm2 # qhasm: xmm2 ^= xmm1 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm0=int6464#1 # asm 2: pshufd $0x93,xmm0=%xmm0 pshufd $0x93,%xmm8,%xmm0 # qhasm: xmm1 = shuffle dwords of xmm9 by 0x93 # asm 1: pshufd $0x93,xmm1=int6464#2 # asm 2: pshufd $0x93,xmm1=%xmm1 pshufd $0x93,%xmm9,%xmm1 # qhasm: xmm2 = shuffle dwords of xmm12 by 0x93 # asm 1: pshufd $0x93,xmm2=int6464#3 # asm 2: pshufd $0x93,xmm2=%xmm2 pshufd $0x93,%xmm12,%xmm2 # qhasm: xmm3 = shuffle dwords of xmm14 by 0x93 # asm 1: pshufd $0x93,xmm3=int6464#4 # asm 2: pshufd $0x93,xmm3=%xmm3 pshufd $0x93,%xmm14,%xmm3 # qhasm: xmm4 = shuffle dwords of xmm11 by 0x93 # asm 1: pshufd $0x93,xmm4=int6464#5 # asm 2: pshufd $0x93,xmm4=%xmm4 pshufd $0x93,%xmm11,%xmm4 # qhasm: xmm5 = shuffle dwords of xmm15 by 0x93 # asm 1: pshufd $0x93,xmm5=int6464#6 # asm 2: pshufd $0x93,xmm5=%xmm5 pshufd $0x93,%xmm15,%xmm5 # qhasm: xmm6 = shuffle dwords of xmm10 by 0x93 # asm 1: pshufd $0x93,xmm6=int6464#7 # asm 2: pshufd $0x93,xmm6=%xmm6 pshufd $0x93,%xmm10,%xmm6 # qhasm: xmm7 = shuffle dwords of xmm13 by 0x93 # asm 1: pshufd $0x93,xmm7=int6464#8 # asm 2: pshufd $0x93,xmm7=%xmm7 pshufd $0x93,%xmm13,%xmm7 # qhasm: xmm8 ^= xmm0 # asm 1: pxor xmm8=int6464#9 # asm 2: pshufd $0x4E,xmm8=%xmm8 pshufd $0x4E,%xmm8,%xmm8 # qhasm: xmm9 = shuffle dwords of xmm9 by 0x4E # asm 1: pshufd $0x4E,xmm9=int6464#10 # asm 2: pshufd $0x4E,xmm9=%xmm9 pshufd $0x4E,%xmm9,%xmm9 # qhasm: xmm12 = shuffle dwords of xmm12 by 0x4E # asm 1: pshufd $0x4E,xmm12=int6464#13 # asm 2: pshufd $0x4E,xmm12=%xmm12 pshufd $0x4E,%xmm12,%xmm12 # qhasm: xmm14 = shuffle dwords of xmm14 by 0x4E # asm 1: pshufd $0x4E,xmm14=int6464#15 # asm 2: pshufd $0x4E,xmm14=%xmm14 pshufd $0x4E,%xmm14,%xmm14 # qhasm: xmm11 = shuffle dwords of xmm11 by 0x4E # asm 1: pshufd $0x4E,xmm11=int6464#12 # asm 2: pshufd $0x4E,xmm11=%xmm11 pshufd $0x4E,%xmm11,%xmm11 # qhasm: xmm15 = shuffle dwords of xmm15 by 0x4E # asm 1: pshufd $0x4E,xmm15=int6464#16 # asm 2: pshufd $0x4E,xmm15=%xmm15 pshufd $0x4E,%xmm15,%xmm15 # qhasm: xmm10 = shuffle dwords of xmm10 by 0x4E # asm 1: pshufd $0x4E,xmm10=int6464#11 # asm 2: pshufd $0x4E,xmm10=%xmm10 pshufd $0x4E,%xmm10,%xmm10 # qhasm: xmm13 = shuffle dwords of xmm13 by 0x4E # asm 1: pshufd $0x4E,xmm13=int6464#14 # asm 2: pshufd $0x4E,xmm13=%xmm13 pshufd $0x4E,%xmm13,%xmm13 # qhasm: xmm0 ^= xmm8 # asm 1: pxor xmm11=int6464#9 # asm 2: movdqa xmm11=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm10 = xmm1 # asm 1: movdqa xmm10=int6464#10 # asm 2: movdqa xmm10=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm9 = xmm5 # asm 1: movdqa xmm9=int6464#11 # asm 2: movdqa xmm9=%xmm10 movdqa %xmm5,%xmm10 # qhasm: xmm13 = xmm2 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm2,%xmm11 # qhasm: xmm12 = xmm6 # asm 1: movdqa xmm12=int6464#13 # asm 2: movdqa xmm12=%xmm12 movdqa %xmm6,%xmm12 # qhasm: xmm11 ^= xmm4 # asm 1: pxor xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm8,%xmm13 # qhasm: xmm8 = xmm10 # asm 1: movdqa xmm8=int6464#15 # asm 2: movdqa xmm8=%xmm14 movdqa %xmm9,%xmm14 # qhasm: xmm15 = xmm11 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm8,%xmm15 # qhasm: xmm10 |= xmm9 # asm 1: por xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm3,%xmm10 # qhasm: xmm12 ^= xmm0 # asm 1: pxor xmm13=int6464#11 # asm 2: movdqa xmm13=%xmm10 movdqa %xmm7,%xmm10 # qhasm: xmm13 ^= xmm1 # asm 1: pxor xmm12=int6464#12 # asm 2: movdqa xmm12=%xmm11 movdqa %xmm5,%xmm11 # qhasm: xmm9 = xmm13 # asm 1: movdqa xmm9=int6464#13 # asm 2: movdqa xmm9=%xmm12 movdqa %xmm10,%xmm12 # qhasm: xmm12 ^= xmm6 # asm 1: pxor xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm2,%xmm10 # qhasm: xmm13 = xmm4 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm4,%xmm11 # qhasm: xmm14 = xmm1 # asm 1: movdqa xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm1,%xmm13 # qhasm: xmm15 = xmm7 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm7,%xmm15 # qhasm: xmm12 &= xmm3 # asm 1: pand xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm8,%xmm10 # qhasm: xmm12 ^= xmm10 # asm 1: pxor xmm14=int6464#12 # asm 2: movdqa xmm14=%xmm11 movdqa %xmm14,%xmm11 # qhasm: xmm14 ^= xmm11 # asm 1: pxor xmm15=int6464#14 # asm 2: movdqa xmm15=%xmm13 movdqa %xmm10,%xmm13 # qhasm: xmm15 &= xmm14 # asm 1: pand xmm13=int6464#16 # asm 2: movdqa xmm13=%xmm15 movdqa %xmm12,%xmm15 # qhasm: xmm13 ^= xmm8 # asm 1: pxor xmm10=int6464#9 # asm 2: movdqa xmm10=%xmm8 movdqa %xmm11,%xmm8 # qhasm: xmm10 ^= xmm13 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm6,%xmm8 # qhasm: xmm8 = xmm5 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm5,%xmm9 # qhasm: xmm10 = xmm15 # asm 1: movdqa xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm10 ^= xmm14 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm8 = xmm1 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm12 ^= xmm4 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm8=int6464#9 # asm 2: pshufd $0x93,xmm8=%xmm8 pshufd $0x93,%xmm0,%xmm8 # qhasm: xmm9 = shuffle dwords of xmm1 by 0x93 # asm 1: pshufd $0x93,xmm9=int6464#10 # asm 2: pshufd $0x93,xmm9=%xmm9 pshufd $0x93,%xmm1,%xmm9 # qhasm: xmm10 = shuffle dwords of xmm4 by 0x93 # asm 1: pshufd $0x93,xmm10=int6464#11 # asm 2: pshufd $0x93,xmm10=%xmm10 pshufd $0x93,%xmm4,%xmm10 # qhasm: xmm11 = shuffle dwords of xmm6 by 0x93 # asm 1: pshufd $0x93,xmm11=int6464#12 # asm 2: pshufd $0x93,xmm11=%xmm11 pshufd $0x93,%xmm6,%xmm11 # qhasm: xmm12 = shuffle dwords of xmm3 by 0x93 # asm 1: pshufd $0x93,xmm12=int6464#13 # asm 2: pshufd $0x93,xmm12=%xmm12 pshufd $0x93,%xmm3,%xmm12 # qhasm: xmm13 = shuffle dwords of xmm7 by 0x93 # asm 1: pshufd $0x93,xmm13=int6464#14 # asm 2: pshufd $0x93,xmm13=%xmm13 pshufd $0x93,%xmm7,%xmm13 # qhasm: xmm14 = shuffle dwords of xmm2 by 0x93 # asm 1: pshufd $0x93,xmm14=int6464#15 # asm 2: pshufd $0x93,xmm14=%xmm14 pshufd $0x93,%xmm2,%xmm14 # qhasm: xmm15 = shuffle dwords of xmm5 by 0x93 # asm 1: pshufd $0x93,xmm15=int6464#16 # asm 2: pshufd $0x93,xmm15=%xmm15 pshufd $0x93,%xmm5,%xmm15 # qhasm: xmm0 ^= xmm8 # asm 1: pxor xmm0=int6464#1 # asm 2: pshufd $0x4E,xmm0=%xmm0 pshufd $0x4E,%xmm0,%xmm0 # qhasm: xmm1 = shuffle dwords of xmm1 by 0x4E # asm 1: pshufd $0x4E,xmm1=int6464#2 # asm 2: pshufd $0x4E,xmm1=%xmm1 pshufd $0x4E,%xmm1,%xmm1 # qhasm: xmm4 = shuffle dwords of xmm4 by 0x4E # asm 1: pshufd $0x4E,xmm4=int6464#5 # asm 2: pshufd $0x4E,xmm4=%xmm4 pshufd $0x4E,%xmm4,%xmm4 # qhasm: xmm6 = shuffle dwords of xmm6 by 0x4E # asm 1: pshufd $0x4E,xmm6=int6464#7 # asm 2: pshufd $0x4E,xmm6=%xmm6 pshufd $0x4E,%xmm6,%xmm6 # qhasm: xmm3 = shuffle dwords of xmm3 by 0x4E # asm 1: pshufd $0x4E,xmm3=int6464#4 # asm 2: pshufd $0x4E,xmm3=%xmm3 pshufd $0x4E,%xmm3,%xmm3 # qhasm: xmm7 = shuffle dwords of xmm7 by 0x4E # asm 1: pshufd $0x4E,xmm7=int6464#8 # asm 2: pshufd $0x4E,xmm7=%xmm7 pshufd $0x4E,%xmm7,%xmm7 # qhasm: xmm2 = shuffle dwords of xmm2 by 0x4E # asm 1: pshufd $0x4E,xmm2=int6464#3 # asm 2: pshufd $0x4E,xmm2=%xmm2 pshufd $0x4E,%xmm2,%xmm2 # qhasm: xmm5 = shuffle dwords of xmm5 by 0x4E # asm 1: pshufd $0x4E,xmm5=int6464#6 # asm 2: pshufd $0x4E,xmm5=%xmm5 pshufd $0x4E,%xmm5,%xmm5 # qhasm: xmm8 ^= xmm0 # asm 1: pxor xmm3=int6464#1 # asm 2: movdqa xmm3=%xmm0 movdqa %xmm15,%xmm0 # qhasm: xmm2 = xmm9 # asm 1: movdqa xmm2=int6464#2 # asm 2: movdqa xmm2=%xmm1 movdqa %xmm9,%xmm1 # qhasm: xmm1 = xmm13 # asm 1: movdqa xmm1=int6464#3 # asm 2: movdqa xmm1=%xmm2 movdqa %xmm13,%xmm2 # qhasm: xmm5 = xmm10 # asm 1: movdqa xmm5=int6464#4 # asm 2: movdqa xmm5=%xmm3 movdqa %xmm10,%xmm3 # qhasm: xmm4 = xmm14 # asm 1: movdqa xmm4=int6464#5 # asm 2: movdqa xmm4=%xmm4 movdqa %xmm14,%xmm4 # qhasm: xmm3 ^= xmm12 # asm 1: pxor xmm6=int6464#6 # asm 2: movdqa xmm6=%xmm5 movdqa %xmm0,%xmm5 # qhasm: xmm0 = xmm2 # asm 1: movdqa xmm0=int6464#7 # asm 2: movdqa xmm0=%xmm6 movdqa %xmm1,%xmm6 # qhasm: xmm7 = xmm3 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm0,%xmm7 # qhasm: xmm2 |= xmm1 # asm 1: por xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm11,%xmm2 # qhasm: xmm4 ^= xmm8 # asm 1: pxor xmm5=int6464#3 # asm 2: movdqa xmm5=%xmm2 movdqa %xmm15,%xmm2 # qhasm: xmm5 ^= xmm9 # asm 1: pxor xmm4=int6464#4 # asm 2: movdqa xmm4=%xmm3 movdqa %xmm13,%xmm3 # qhasm: xmm1 = xmm5 # asm 1: movdqa xmm1=int6464#5 # asm 2: movdqa xmm1=%xmm4 movdqa %xmm2,%xmm4 # qhasm: xmm4 ^= xmm14 # asm 1: pxor xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm10,%xmm2 # qhasm: xmm5 = xmm12 # asm 1: movdqa xmm5=int6464#4 # asm 2: movdqa xmm5=%xmm3 movdqa %xmm12,%xmm3 # qhasm: xmm6 = xmm9 # asm 1: movdqa xmm6=int6464#6 # asm 2: movdqa xmm6=%xmm5 movdqa %xmm9,%xmm5 # qhasm: xmm7 = xmm15 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm15,%xmm7 # qhasm: xmm4 &= xmm11 # asm 1: pand xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm0,%xmm2 # qhasm: xmm4 ^= xmm2 # asm 1: pxor xmm6=int6464#4 # asm 2: movdqa xmm6=%xmm3 movdqa %xmm6,%xmm3 # qhasm: xmm6 ^= xmm3 # asm 1: pxor xmm7=int6464#6 # asm 2: movdqa xmm7=%xmm5 movdqa %xmm2,%xmm5 # qhasm: xmm7 &= xmm6 # asm 1: pand xmm5=int6464#8 # asm 2: movdqa xmm5=%xmm7 movdqa %xmm4,%xmm7 # qhasm: xmm5 ^= xmm0 # asm 1: pxor xmm2=int6464#1 # asm 2: movdqa xmm2=%xmm0 movdqa %xmm3,%xmm0 # qhasm: xmm2 ^= xmm5 # asm 1: pxor xmm4=int6464#1 # asm 2: movdqa xmm4=%xmm0 movdqa %xmm14,%xmm0 # qhasm: xmm0 = xmm13 # asm 1: movdqa xmm0=int6464#2 # asm 2: movdqa xmm0=%xmm1 movdqa %xmm13,%xmm1 # qhasm: xmm2 = xmm7 # asm 1: movdqa xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm2 ^= xmm6 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm7,%xmm2 # qhasm: xmm2 ^= xmm1 # asm 1: pxor xmm4=int6464#1 # asm 2: movdqa xmm4=%xmm0 movdqa %xmm15,%xmm0 # qhasm: xmm0 = xmm9 # asm 1: movdqa xmm0=int6464#2 # asm 2: movdqa xmm0=%xmm1 movdqa %xmm9,%xmm1 # qhasm: xmm4 ^= xmm12 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm7,%xmm2 # qhasm: xmm2 ^= xmm1 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm0=int6464#1 # asm 2: pshufd $0x93,xmm0=%xmm0 pshufd $0x93,%xmm8,%xmm0 # qhasm: xmm1 = shuffle dwords of xmm9 by 0x93 # asm 1: pshufd $0x93,xmm1=int6464#2 # asm 2: pshufd $0x93,xmm1=%xmm1 pshufd $0x93,%xmm9,%xmm1 # qhasm: xmm2 = shuffle dwords of xmm12 by 0x93 # asm 1: pshufd $0x93,xmm2=int6464#3 # asm 2: pshufd $0x93,xmm2=%xmm2 pshufd $0x93,%xmm12,%xmm2 # qhasm: xmm3 = shuffle dwords of xmm14 by 0x93 # asm 1: pshufd $0x93,xmm3=int6464#4 # asm 2: pshufd $0x93,xmm3=%xmm3 pshufd $0x93,%xmm14,%xmm3 # qhasm: xmm4 = shuffle dwords of xmm11 by 0x93 # asm 1: pshufd $0x93,xmm4=int6464#5 # asm 2: pshufd $0x93,xmm4=%xmm4 pshufd $0x93,%xmm11,%xmm4 # qhasm: xmm5 = shuffle dwords of xmm15 by 0x93 # asm 1: pshufd $0x93,xmm5=int6464#6 # asm 2: pshufd $0x93,xmm5=%xmm5 pshufd $0x93,%xmm15,%xmm5 # qhasm: xmm6 = shuffle dwords of xmm10 by 0x93 # asm 1: pshufd $0x93,xmm6=int6464#7 # asm 2: pshufd $0x93,xmm6=%xmm6 pshufd $0x93,%xmm10,%xmm6 # qhasm: xmm7 = shuffle dwords of xmm13 by 0x93 # asm 1: pshufd $0x93,xmm7=int6464#8 # asm 2: pshufd $0x93,xmm7=%xmm7 pshufd $0x93,%xmm13,%xmm7 # qhasm: xmm8 ^= xmm0 # asm 1: pxor xmm8=int6464#9 # asm 2: pshufd $0x4E,xmm8=%xmm8 pshufd $0x4E,%xmm8,%xmm8 # qhasm: xmm9 = shuffle dwords of xmm9 by 0x4E # asm 1: pshufd $0x4E,xmm9=int6464#10 # asm 2: pshufd $0x4E,xmm9=%xmm9 pshufd $0x4E,%xmm9,%xmm9 # qhasm: xmm12 = shuffle dwords of xmm12 by 0x4E # asm 1: pshufd $0x4E,xmm12=int6464#13 # asm 2: pshufd $0x4E,xmm12=%xmm12 pshufd $0x4E,%xmm12,%xmm12 # qhasm: xmm14 = shuffle dwords of xmm14 by 0x4E # asm 1: pshufd $0x4E,xmm14=int6464#15 # asm 2: pshufd $0x4E,xmm14=%xmm14 pshufd $0x4E,%xmm14,%xmm14 # qhasm: xmm11 = shuffle dwords of xmm11 by 0x4E # asm 1: pshufd $0x4E,xmm11=int6464#12 # asm 2: pshufd $0x4E,xmm11=%xmm11 pshufd $0x4E,%xmm11,%xmm11 # qhasm: xmm15 = shuffle dwords of xmm15 by 0x4E # asm 1: pshufd $0x4E,xmm15=int6464#16 # asm 2: pshufd $0x4E,xmm15=%xmm15 pshufd $0x4E,%xmm15,%xmm15 # qhasm: xmm10 = shuffle dwords of xmm10 by 0x4E # asm 1: pshufd $0x4E,xmm10=int6464#11 # asm 2: pshufd $0x4E,xmm10=%xmm10 pshufd $0x4E,%xmm10,%xmm10 # qhasm: xmm13 = shuffle dwords of xmm13 by 0x4E # asm 1: pshufd $0x4E,xmm13=int6464#14 # asm 2: pshufd $0x4E,xmm13=%xmm13 pshufd $0x4E,%xmm13,%xmm13 # qhasm: xmm0 ^= xmm8 # asm 1: pxor xmm11=int6464#9 # asm 2: movdqa xmm11=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm10 = xmm1 # asm 1: movdqa xmm10=int6464#10 # asm 2: movdqa xmm10=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm9 = xmm5 # asm 1: movdqa xmm9=int6464#11 # asm 2: movdqa xmm9=%xmm10 movdqa %xmm5,%xmm10 # qhasm: xmm13 = xmm2 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm2,%xmm11 # qhasm: xmm12 = xmm6 # asm 1: movdqa xmm12=int6464#13 # asm 2: movdqa xmm12=%xmm12 movdqa %xmm6,%xmm12 # qhasm: xmm11 ^= xmm4 # asm 1: pxor xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm8,%xmm13 # qhasm: xmm8 = xmm10 # asm 1: movdqa xmm8=int6464#15 # asm 2: movdqa xmm8=%xmm14 movdqa %xmm9,%xmm14 # qhasm: xmm15 = xmm11 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm8,%xmm15 # qhasm: xmm10 |= xmm9 # asm 1: por xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm3,%xmm10 # qhasm: xmm12 ^= xmm0 # asm 1: pxor xmm13=int6464#11 # asm 2: movdqa xmm13=%xmm10 movdqa %xmm7,%xmm10 # qhasm: xmm13 ^= xmm1 # asm 1: pxor xmm12=int6464#12 # asm 2: movdqa xmm12=%xmm11 movdqa %xmm5,%xmm11 # qhasm: xmm9 = xmm13 # asm 1: movdqa xmm9=int6464#13 # asm 2: movdqa xmm9=%xmm12 movdqa %xmm10,%xmm12 # qhasm: xmm12 ^= xmm6 # asm 1: pxor xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm2,%xmm10 # qhasm: xmm13 = xmm4 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm4,%xmm11 # qhasm: xmm14 = xmm1 # asm 1: movdqa xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm1,%xmm13 # qhasm: xmm15 = xmm7 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm7,%xmm15 # qhasm: xmm12 &= xmm3 # asm 1: pand xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm8,%xmm10 # qhasm: xmm12 ^= xmm10 # asm 1: pxor xmm14=int6464#12 # asm 2: movdqa xmm14=%xmm11 movdqa %xmm14,%xmm11 # qhasm: xmm14 ^= xmm11 # asm 1: pxor xmm15=int6464#14 # asm 2: movdqa xmm15=%xmm13 movdqa %xmm10,%xmm13 # qhasm: xmm15 &= xmm14 # asm 1: pand xmm13=int6464#16 # asm 2: movdqa xmm13=%xmm15 movdqa %xmm12,%xmm15 # qhasm: xmm13 ^= xmm8 # asm 1: pxor xmm10=int6464#9 # asm 2: movdqa xmm10=%xmm8 movdqa %xmm11,%xmm8 # qhasm: xmm10 ^= xmm13 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm6,%xmm8 # qhasm: xmm8 = xmm5 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm5,%xmm9 # qhasm: xmm10 = xmm15 # asm 1: movdqa xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm10 ^= xmm14 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm8 = xmm1 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm12 ^= xmm4 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm8=int6464#9 # asm 2: pshufd $0x93,xmm8=%xmm8 pshufd $0x93,%xmm0,%xmm8 # qhasm: xmm9 = shuffle dwords of xmm1 by 0x93 # asm 1: pshufd $0x93,xmm9=int6464#10 # asm 2: pshufd $0x93,xmm9=%xmm9 pshufd $0x93,%xmm1,%xmm9 # qhasm: xmm10 = shuffle dwords of xmm4 by 0x93 # asm 1: pshufd $0x93,xmm10=int6464#11 # asm 2: pshufd $0x93,xmm10=%xmm10 pshufd $0x93,%xmm4,%xmm10 # qhasm: xmm11 = shuffle dwords of xmm6 by 0x93 # asm 1: pshufd $0x93,xmm11=int6464#12 # asm 2: pshufd $0x93,xmm11=%xmm11 pshufd $0x93,%xmm6,%xmm11 # qhasm: xmm12 = shuffle dwords of xmm3 by 0x93 # asm 1: pshufd $0x93,xmm12=int6464#13 # asm 2: pshufd $0x93,xmm12=%xmm12 pshufd $0x93,%xmm3,%xmm12 # qhasm: xmm13 = shuffle dwords of xmm7 by 0x93 # asm 1: pshufd $0x93,xmm13=int6464#14 # asm 2: pshufd $0x93,xmm13=%xmm13 pshufd $0x93,%xmm7,%xmm13 # qhasm: xmm14 = shuffle dwords of xmm2 by 0x93 # asm 1: pshufd $0x93,xmm14=int6464#15 # asm 2: pshufd $0x93,xmm14=%xmm14 pshufd $0x93,%xmm2,%xmm14 # qhasm: xmm15 = shuffle dwords of xmm5 by 0x93 # asm 1: pshufd $0x93,xmm15=int6464#16 # asm 2: pshufd $0x93,xmm15=%xmm15 pshufd $0x93,%xmm5,%xmm15 # qhasm: xmm0 ^= xmm8 # asm 1: pxor xmm0=int6464#1 # asm 2: pshufd $0x4E,xmm0=%xmm0 pshufd $0x4E,%xmm0,%xmm0 # qhasm: xmm1 = shuffle dwords of xmm1 by 0x4E # asm 1: pshufd $0x4E,xmm1=int6464#2 # asm 2: pshufd $0x4E,xmm1=%xmm1 pshufd $0x4E,%xmm1,%xmm1 # qhasm: xmm4 = shuffle dwords of xmm4 by 0x4E # asm 1: pshufd $0x4E,xmm4=int6464#5 # asm 2: pshufd $0x4E,xmm4=%xmm4 pshufd $0x4E,%xmm4,%xmm4 # qhasm: xmm6 = shuffle dwords of xmm6 by 0x4E # asm 1: pshufd $0x4E,xmm6=int6464#7 # asm 2: pshufd $0x4E,xmm6=%xmm6 pshufd $0x4E,%xmm6,%xmm6 # qhasm: xmm3 = shuffle dwords of xmm3 by 0x4E # asm 1: pshufd $0x4E,xmm3=int6464#4 # asm 2: pshufd $0x4E,xmm3=%xmm3 pshufd $0x4E,%xmm3,%xmm3 # qhasm: xmm7 = shuffle dwords of xmm7 by 0x4E # asm 1: pshufd $0x4E,xmm7=int6464#8 # asm 2: pshufd $0x4E,xmm7=%xmm7 pshufd $0x4E,%xmm7,%xmm7 # qhasm: xmm2 = shuffle dwords of xmm2 by 0x4E # asm 1: pshufd $0x4E,xmm2=int6464#3 # asm 2: pshufd $0x4E,xmm2=%xmm2 pshufd $0x4E,%xmm2,%xmm2 # qhasm: xmm5 = shuffle dwords of xmm5 by 0x4E # asm 1: pshufd $0x4E,xmm5=int6464#6 # asm 2: pshufd $0x4E,xmm5=%xmm5 pshufd $0x4E,%xmm5,%xmm5 # qhasm: xmm8 ^= xmm0 # asm 1: pxor xmm3=int6464#1 # asm 2: movdqa xmm3=%xmm0 movdqa %xmm15,%xmm0 # qhasm: xmm2 = xmm9 # asm 1: movdqa xmm2=int6464#2 # asm 2: movdqa xmm2=%xmm1 movdqa %xmm9,%xmm1 # qhasm: xmm1 = xmm13 # asm 1: movdqa xmm1=int6464#3 # asm 2: movdqa xmm1=%xmm2 movdqa %xmm13,%xmm2 # qhasm: xmm5 = xmm10 # asm 1: movdqa xmm5=int6464#4 # asm 2: movdqa xmm5=%xmm3 movdqa %xmm10,%xmm3 # qhasm: xmm4 = xmm14 # asm 1: movdqa xmm4=int6464#5 # asm 2: movdqa xmm4=%xmm4 movdqa %xmm14,%xmm4 # qhasm: xmm3 ^= xmm12 # asm 1: pxor xmm6=int6464#6 # asm 2: movdqa xmm6=%xmm5 movdqa %xmm0,%xmm5 # qhasm: xmm0 = xmm2 # asm 1: movdqa xmm0=int6464#7 # asm 2: movdqa xmm0=%xmm6 movdqa %xmm1,%xmm6 # qhasm: xmm7 = xmm3 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm0,%xmm7 # qhasm: xmm2 |= xmm1 # asm 1: por xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm11,%xmm2 # qhasm: xmm4 ^= xmm8 # asm 1: pxor xmm5=int6464#3 # asm 2: movdqa xmm5=%xmm2 movdqa %xmm15,%xmm2 # qhasm: xmm5 ^= xmm9 # asm 1: pxor xmm4=int6464#4 # asm 2: movdqa xmm4=%xmm3 movdqa %xmm13,%xmm3 # qhasm: xmm1 = xmm5 # asm 1: movdqa xmm1=int6464#5 # asm 2: movdqa xmm1=%xmm4 movdqa %xmm2,%xmm4 # qhasm: xmm4 ^= xmm14 # asm 1: pxor xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm10,%xmm2 # qhasm: xmm5 = xmm12 # asm 1: movdqa xmm5=int6464#4 # asm 2: movdqa xmm5=%xmm3 movdqa %xmm12,%xmm3 # qhasm: xmm6 = xmm9 # asm 1: movdqa xmm6=int6464#6 # asm 2: movdqa xmm6=%xmm5 movdqa %xmm9,%xmm5 # qhasm: xmm7 = xmm15 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm15,%xmm7 # qhasm: xmm4 &= xmm11 # asm 1: pand xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm0,%xmm2 # qhasm: xmm4 ^= xmm2 # asm 1: pxor xmm6=int6464#4 # asm 2: movdqa xmm6=%xmm3 movdqa %xmm6,%xmm3 # qhasm: xmm6 ^= xmm3 # asm 1: pxor xmm7=int6464#6 # asm 2: movdqa xmm7=%xmm5 movdqa %xmm2,%xmm5 # qhasm: xmm7 &= xmm6 # asm 1: pand xmm5=int6464#8 # asm 2: movdqa xmm5=%xmm7 movdqa %xmm4,%xmm7 # qhasm: xmm5 ^= xmm0 # asm 1: pxor xmm2=int6464#1 # asm 2: movdqa xmm2=%xmm0 movdqa %xmm3,%xmm0 # qhasm: xmm2 ^= xmm5 # asm 1: pxor xmm4=int6464#1 # asm 2: movdqa xmm4=%xmm0 movdqa %xmm14,%xmm0 # qhasm: xmm0 = xmm13 # asm 1: movdqa xmm0=int6464#2 # asm 2: movdqa xmm0=%xmm1 movdqa %xmm13,%xmm1 # qhasm: xmm2 = xmm7 # asm 1: movdqa xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm2 ^= xmm6 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm7,%xmm2 # qhasm: xmm2 ^= xmm1 # asm 1: pxor xmm4=int6464#1 # asm 2: movdqa xmm4=%xmm0 movdqa %xmm15,%xmm0 # qhasm: xmm0 = xmm9 # asm 1: movdqa xmm0=int6464#2 # asm 2: movdqa xmm0=%xmm1 movdqa %xmm9,%xmm1 # qhasm: xmm4 ^= xmm12 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm7,%xmm2 # qhasm: xmm2 ^= xmm1 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm0=int6464#1 # asm 2: pshufd $0x93,xmm0=%xmm0 pshufd $0x93,%xmm8,%xmm0 # qhasm: xmm1 = shuffle dwords of xmm9 by 0x93 # asm 1: pshufd $0x93,xmm1=int6464#2 # asm 2: pshufd $0x93,xmm1=%xmm1 pshufd $0x93,%xmm9,%xmm1 # qhasm: xmm2 = shuffle dwords of xmm12 by 0x93 # asm 1: pshufd $0x93,xmm2=int6464#3 # asm 2: pshufd $0x93,xmm2=%xmm2 pshufd $0x93,%xmm12,%xmm2 # qhasm: xmm3 = shuffle dwords of xmm14 by 0x93 # asm 1: pshufd $0x93,xmm3=int6464#4 # asm 2: pshufd $0x93,xmm3=%xmm3 pshufd $0x93,%xmm14,%xmm3 # qhasm: xmm4 = shuffle dwords of xmm11 by 0x93 # asm 1: pshufd $0x93,xmm4=int6464#5 # asm 2: pshufd $0x93,xmm4=%xmm4 pshufd $0x93,%xmm11,%xmm4 # qhasm: xmm5 = shuffle dwords of xmm15 by 0x93 # asm 1: pshufd $0x93,xmm5=int6464#6 # asm 2: pshufd $0x93,xmm5=%xmm5 pshufd $0x93,%xmm15,%xmm5 # qhasm: xmm6 = shuffle dwords of xmm10 by 0x93 # asm 1: pshufd $0x93,xmm6=int6464#7 # asm 2: pshufd $0x93,xmm6=%xmm6 pshufd $0x93,%xmm10,%xmm6 # qhasm: xmm7 = shuffle dwords of xmm13 by 0x93 # asm 1: pshufd $0x93,xmm7=int6464#8 # asm 2: pshufd $0x93,xmm7=%xmm7 pshufd $0x93,%xmm13,%xmm7 # qhasm: xmm8 ^= xmm0 # asm 1: pxor xmm8=int6464#9 # asm 2: pshufd $0x4E,xmm8=%xmm8 pshufd $0x4E,%xmm8,%xmm8 # qhasm: xmm9 = shuffle dwords of xmm9 by 0x4E # asm 1: pshufd $0x4E,xmm9=int6464#10 # asm 2: pshufd $0x4E,xmm9=%xmm9 pshufd $0x4E,%xmm9,%xmm9 # qhasm: xmm12 = shuffle dwords of xmm12 by 0x4E # asm 1: pshufd $0x4E,xmm12=int6464#13 # asm 2: pshufd $0x4E,xmm12=%xmm12 pshufd $0x4E,%xmm12,%xmm12 # qhasm: xmm14 = shuffle dwords of xmm14 by 0x4E # asm 1: pshufd $0x4E,xmm14=int6464#15 # asm 2: pshufd $0x4E,xmm14=%xmm14 pshufd $0x4E,%xmm14,%xmm14 # qhasm: xmm11 = shuffle dwords of xmm11 by 0x4E # asm 1: pshufd $0x4E,xmm11=int6464#12 # asm 2: pshufd $0x4E,xmm11=%xmm11 pshufd $0x4E,%xmm11,%xmm11 # qhasm: xmm15 = shuffle dwords of xmm15 by 0x4E # asm 1: pshufd $0x4E,xmm15=int6464#16 # asm 2: pshufd $0x4E,xmm15=%xmm15 pshufd $0x4E,%xmm15,%xmm15 # qhasm: xmm10 = shuffle dwords of xmm10 by 0x4E # asm 1: pshufd $0x4E,xmm10=int6464#11 # asm 2: pshufd $0x4E,xmm10=%xmm10 pshufd $0x4E,%xmm10,%xmm10 # qhasm: xmm13 = shuffle dwords of xmm13 by 0x4E # asm 1: pshufd $0x4E,xmm13=int6464#14 # asm 2: pshufd $0x4E,xmm13=%xmm13 pshufd $0x4E,%xmm13,%xmm13 # qhasm: xmm0 ^= xmm8 # asm 1: pxor xmm11=int6464#9 # asm 2: movdqa xmm11=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm10 = xmm1 # asm 1: movdqa xmm10=int6464#10 # asm 2: movdqa xmm10=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm9 = xmm5 # asm 1: movdqa xmm9=int6464#11 # asm 2: movdqa xmm9=%xmm10 movdqa %xmm5,%xmm10 # qhasm: xmm13 = xmm2 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm2,%xmm11 # qhasm: xmm12 = xmm6 # asm 1: movdqa xmm12=int6464#13 # asm 2: movdqa xmm12=%xmm12 movdqa %xmm6,%xmm12 # qhasm: xmm11 ^= xmm4 # asm 1: pxor xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm8,%xmm13 # qhasm: xmm8 = xmm10 # asm 1: movdqa xmm8=int6464#15 # asm 2: movdqa xmm8=%xmm14 movdqa %xmm9,%xmm14 # qhasm: xmm15 = xmm11 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm8,%xmm15 # qhasm: xmm10 |= xmm9 # asm 1: por xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm3,%xmm10 # qhasm: xmm12 ^= xmm0 # asm 1: pxor xmm13=int6464#11 # asm 2: movdqa xmm13=%xmm10 movdqa %xmm7,%xmm10 # qhasm: xmm13 ^= xmm1 # asm 1: pxor xmm12=int6464#12 # asm 2: movdqa xmm12=%xmm11 movdqa %xmm5,%xmm11 # qhasm: xmm9 = xmm13 # asm 1: movdqa xmm9=int6464#13 # asm 2: movdqa xmm9=%xmm12 movdqa %xmm10,%xmm12 # qhasm: xmm12 ^= xmm6 # asm 1: pxor xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm2,%xmm10 # qhasm: xmm13 = xmm4 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm4,%xmm11 # qhasm: xmm14 = xmm1 # asm 1: movdqa xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm1,%xmm13 # qhasm: xmm15 = xmm7 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm7,%xmm15 # qhasm: xmm12 &= xmm3 # asm 1: pand xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm8,%xmm10 # qhasm: xmm12 ^= xmm10 # asm 1: pxor xmm14=int6464#12 # asm 2: movdqa xmm14=%xmm11 movdqa %xmm14,%xmm11 # qhasm: xmm14 ^= xmm11 # asm 1: pxor xmm15=int6464#14 # asm 2: movdqa xmm15=%xmm13 movdqa %xmm10,%xmm13 # qhasm: xmm15 &= xmm14 # asm 1: pand xmm13=int6464#16 # asm 2: movdqa xmm13=%xmm15 movdqa %xmm12,%xmm15 # qhasm: xmm13 ^= xmm8 # asm 1: pxor xmm10=int6464#9 # asm 2: movdqa xmm10=%xmm8 movdqa %xmm11,%xmm8 # qhasm: xmm10 ^= xmm13 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm6,%xmm8 # qhasm: xmm8 = xmm5 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm5,%xmm9 # qhasm: xmm10 = xmm15 # asm 1: movdqa xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm10 ^= xmm14 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm8 = xmm1 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm12 ^= xmm4 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm8=int6464#9 # asm 2: pshufd $0x93,xmm8=%xmm8 pshufd $0x93,%xmm0,%xmm8 # qhasm: xmm9 = shuffle dwords of xmm1 by 0x93 # asm 1: pshufd $0x93,xmm9=int6464#10 # asm 2: pshufd $0x93,xmm9=%xmm9 pshufd $0x93,%xmm1,%xmm9 # qhasm: xmm10 = shuffle dwords of xmm4 by 0x93 # asm 1: pshufd $0x93,xmm10=int6464#11 # asm 2: pshufd $0x93,xmm10=%xmm10 pshufd $0x93,%xmm4,%xmm10 # qhasm: xmm11 = shuffle dwords of xmm6 by 0x93 # asm 1: pshufd $0x93,xmm11=int6464#12 # asm 2: pshufd $0x93,xmm11=%xmm11 pshufd $0x93,%xmm6,%xmm11 # qhasm: xmm12 = shuffle dwords of xmm3 by 0x93 # asm 1: pshufd $0x93,xmm12=int6464#13 # asm 2: pshufd $0x93,xmm12=%xmm12 pshufd $0x93,%xmm3,%xmm12 # qhasm: xmm13 = shuffle dwords of xmm7 by 0x93 # asm 1: pshufd $0x93,xmm13=int6464#14 # asm 2: pshufd $0x93,xmm13=%xmm13 pshufd $0x93,%xmm7,%xmm13 # qhasm: xmm14 = shuffle dwords of xmm2 by 0x93 # asm 1: pshufd $0x93,xmm14=int6464#15 # asm 2: pshufd $0x93,xmm14=%xmm14 pshufd $0x93,%xmm2,%xmm14 # qhasm: xmm15 = shuffle dwords of xmm5 by 0x93 # asm 1: pshufd $0x93,xmm15=int6464#16 # asm 2: pshufd $0x93,xmm15=%xmm15 pshufd $0x93,%xmm5,%xmm15 # qhasm: xmm0 ^= xmm8 # asm 1: pxor xmm0=int6464#1 # asm 2: pshufd $0x4E,xmm0=%xmm0 pshufd $0x4E,%xmm0,%xmm0 # qhasm: xmm1 = shuffle dwords of xmm1 by 0x4E # asm 1: pshufd $0x4E,xmm1=int6464#2 # asm 2: pshufd $0x4E,xmm1=%xmm1 pshufd $0x4E,%xmm1,%xmm1 # qhasm: xmm4 = shuffle dwords of xmm4 by 0x4E # asm 1: pshufd $0x4E,xmm4=int6464#5 # asm 2: pshufd $0x4E,xmm4=%xmm4 pshufd $0x4E,%xmm4,%xmm4 # qhasm: xmm6 = shuffle dwords of xmm6 by 0x4E # asm 1: pshufd $0x4E,xmm6=int6464#7 # asm 2: pshufd $0x4E,xmm6=%xmm6 pshufd $0x4E,%xmm6,%xmm6 # qhasm: xmm3 = shuffle dwords of xmm3 by 0x4E # asm 1: pshufd $0x4E,xmm3=int6464#4 # asm 2: pshufd $0x4E,xmm3=%xmm3 pshufd $0x4E,%xmm3,%xmm3 # qhasm: xmm7 = shuffle dwords of xmm7 by 0x4E # asm 1: pshufd $0x4E,xmm7=int6464#8 # asm 2: pshufd $0x4E,xmm7=%xmm7 pshufd $0x4E,%xmm7,%xmm7 # qhasm: xmm2 = shuffle dwords of xmm2 by 0x4E # asm 1: pshufd $0x4E,xmm2=int6464#3 # asm 2: pshufd $0x4E,xmm2=%xmm2 pshufd $0x4E,%xmm2,%xmm2 # qhasm: xmm5 = shuffle dwords of xmm5 by 0x4E # asm 1: pshufd $0x4E,xmm5=int6464#6 # asm 2: pshufd $0x4E,xmm5=%xmm5 pshufd $0x4E,%xmm5,%xmm5 # qhasm: xmm8 ^= xmm0 # asm 1: pxor xmm3=int6464#1 # asm 2: movdqa xmm3=%xmm0 movdqa %xmm15,%xmm0 # qhasm: xmm2 = xmm9 # asm 1: movdqa xmm2=int6464#2 # asm 2: movdqa xmm2=%xmm1 movdqa %xmm9,%xmm1 # qhasm: xmm1 = xmm13 # asm 1: movdqa xmm1=int6464#3 # asm 2: movdqa xmm1=%xmm2 movdqa %xmm13,%xmm2 # qhasm: xmm5 = xmm10 # asm 1: movdqa xmm5=int6464#4 # asm 2: movdqa xmm5=%xmm3 movdqa %xmm10,%xmm3 # qhasm: xmm4 = xmm14 # asm 1: movdqa xmm4=int6464#5 # asm 2: movdqa xmm4=%xmm4 movdqa %xmm14,%xmm4 # qhasm: xmm3 ^= xmm12 # asm 1: pxor xmm6=int6464#6 # asm 2: movdqa xmm6=%xmm5 movdqa %xmm0,%xmm5 # qhasm: xmm0 = xmm2 # asm 1: movdqa xmm0=int6464#7 # asm 2: movdqa xmm0=%xmm6 movdqa %xmm1,%xmm6 # qhasm: xmm7 = xmm3 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm0,%xmm7 # qhasm: xmm2 |= xmm1 # asm 1: por xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm11,%xmm2 # qhasm: xmm4 ^= xmm8 # asm 1: pxor xmm5=int6464#3 # asm 2: movdqa xmm5=%xmm2 movdqa %xmm15,%xmm2 # qhasm: xmm5 ^= xmm9 # asm 1: pxor xmm4=int6464#4 # asm 2: movdqa xmm4=%xmm3 movdqa %xmm13,%xmm3 # qhasm: xmm1 = xmm5 # asm 1: movdqa xmm1=int6464#5 # asm 2: movdqa xmm1=%xmm4 movdqa %xmm2,%xmm4 # qhasm: xmm4 ^= xmm14 # asm 1: pxor xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm10,%xmm2 # qhasm: xmm5 = xmm12 # asm 1: movdqa xmm5=int6464#4 # asm 2: movdqa xmm5=%xmm3 movdqa %xmm12,%xmm3 # qhasm: xmm6 = xmm9 # asm 1: movdqa xmm6=int6464#6 # asm 2: movdqa xmm6=%xmm5 movdqa %xmm9,%xmm5 # qhasm: xmm7 = xmm15 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm15,%xmm7 # qhasm: xmm4 &= xmm11 # asm 1: pand xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm0,%xmm2 # qhasm: xmm4 ^= xmm2 # asm 1: pxor xmm6=int6464#4 # asm 2: movdqa xmm6=%xmm3 movdqa %xmm6,%xmm3 # qhasm: xmm6 ^= xmm3 # asm 1: pxor xmm7=int6464#6 # asm 2: movdqa xmm7=%xmm5 movdqa %xmm2,%xmm5 # qhasm: xmm7 &= xmm6 # asm 1: pand xmm5=int6464#8 # asm 2: movdqa xmm5=%xmm7 movdqa %xmm4,%xmm7 # qhasm: xmm5 ^= xmm0 # asm 1: pxor xmm2=int6464#1 # asm 2: movdqa xmm2=%xmm0 movdqa %xmm3,%xmm0 # qhasm: xmm2 ^= xmm5 # asm 1: pxor xmm4=int6464#1 # asm 2: movdqa xmm4=%xmm0 movdqa %xmm14,%xmm0 # qhasm: xmm0 = xmm13 # asm 1: movdqa xmm0=int6464#2 # asm 2: movdqa xmm0=%xmm1 movdqa %xmm13,%xmm1 # qhasm: xmm2 = xmm7 # asm 1: movdqa xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm2 ^= xmm6 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm7,%xmm2 # qhasm: xmm2 ^= xmm1 # asm 1: pxor xmm4=int6464#1 # asm 2: movdqa xmm4=%xmm0 movdqa %xmm15,%xmm0 # qhasm: xmm0 = xmm9 # asm 1: movdqa xmm0=int6464#2 # asm 2: movdqa xmm0=%xmm1 movdqa %xmm9,%xmm1 # qhasm: xmm4 ^= xmm12 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm7,%xmm2 # qhasm: xmm2 ^= xmm1 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm0=int6464#1 # asm 2: pshufd $0x93,xmm0=%xmm0 pshufd $0x93,%xmm8,%xmm0 # qhasm: xmm1 = shuffle dwords of xmm9 by 0x93 # asm 1: pshufd $0x93,xmm1=int6464#2 # asm 2: pshufd $0x93,xmm1=%xmm1 pshufd $0x93,%xmm9,%xmm1 # qhasm: xmm2 = shuffle dwords of xmm12 by 0x93 # asm 1: pshufd $0x93,xmm2=int6464#3 # asm 2: pshufd $0x93,xmm2=%xmm2 pshufd $0x93,%xmm12,%xmm2 # qhasm: xmm3 = shuffle dwords of xmm14 by 0x93 # asm 1: pshufd $0x93,xmm3=int6464#4 # asm 2: pshufd $0x93,xmm3=%xmm3 pshufd $0x93,%xmm14,%xmm3 # qhasm: xmm4 = shuffle dwords of xmm11 by 0x93 # asm 1: pshufd $0x93,xmm4=int6464#5 # asm 2: pshufd $0x93,xmm4=%xmm4 pshufd $0x93,%xmm11,%xmm4 # qhasm: xmm5 = shuffle dwords of xmm15 by 0x93 # asm 1: pshufd $0x93,xmm5=int6464#6 # asm 2: pshufd $0x93,xmm5=%xmm5 pshufd $0x93,%xmm15,%xmm5 # qhasm: xmm6 = shuffle dwords of xmm10 by 0x93 # asm 1: pshufd $0x93,xmm6=int6464#7 # asm 2: pshufd $0x93,xmm6=%xmm6 pshufd $0x93,%xmm10,%xmm6 # qhasm: xmm7 = shuffle dwords of xmm13 by 0x93 # asm 1: pshufd $0x93,xmm7=int6464#8 # asm 2: pshufd $0x93,xmm7=%xmm7 pshufd $0x93,%xmm13,%xmm7 # qhasm: xmm8 ^= xmm0 # asm 1: pxor xmm8=int6464#9 # asm 2: pshufd $0x4E,xmm8=%xmm8 pshufd $0x4E,%xmm8,%xmm8 # qhasm: xmm9 = shuffle dwords of xmm9 by 0x4E # asm 1: pshufd $0x4E,xmm9=int6464#10 # asm 2: pshufd $0x4E,xmm9=%xmm9 pshufd $0x4E,%xmm9,%xmm9 # qhasm: xmm12 = shuffle dwords of xmm12 by 0x4E # asm 1: pshufd $0x4E,xmm12=int6464#13 # asm 2: pshufd $0x4E,xmm12=%xmm12 pshufd $0x4E,%xmm12,%xmm12 # qhasm: xmm14 = shuffle dwords of xmm14 by 0x4E # asm 1: pshufd $0x4E,xmm14=int6464#15 # asm 2: pshufd $0x4E,xmm14=%xmm14 pshufd $0x4E,%xmm14,%xmm14 # qhasm: xmm11 = shuffle dwords of xmm11 by 0x4E # asm 1: pshufd $0x4E,xmm11=int6464#12 # asm 2: pshufd $0x4E,xmm11=%xmm11 pshufd $0x4E,%xmm11,%xmm11 # qhasm: xmm15 = shuffle dwords of xmm15 by 0x4E # asm 1: pshufd $0x4E,xmm15=int6464#16 # asm 2: pshufd $0x4E,xmm15=%xmm15 pshufd $0x4E,%xmm15,%xmm15 # qhasm: xmm10 = shuffle dwords of xmm10 by 0x4E # asm 1: pshufd $0x4E,xmm10=int6464#11 # asm 2: pshufd $0x4E,xmm10=%xmm10 pshufd $0x4E,%xmm10,%xmm10 # qhasm: xmm13 = shuffle dwords of xmm13 by 0x4E # asm 1: pshufd $0x4E,xmm13=int6464#14 # asm 2: pshufd $0x4E,xmm13=%xmm13 pshufd $0x4E,%xmm13,%xmm13 # qhasm: xmm0 ^= xmm8 # asm 1: pxor xmm11=int6464#9 # asm 2: movdqa xmm11=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm10 = xmm1 # asm 1: movdqa xmm10=int6464#10 # asm 2: movdqa xmm10=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm9 = xmm5 # asm 1: movdqa xmm9=int6464#11 # asm 2: movdqa xmm9=%xmm10 movdqa %xmm5,%xmm10 # qhasm: xmm13 = xmm2 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm2,%xmm11 # qhasm: xmm12 = xmm6 # asm 1: movdqa xmm12=int6464#13 # asm 2: movdqa xmm12=%xmm12 movdqa %xmm6,%xmm12 # qhasm: xmm11 ^= xmm4 # asm 1: pxor xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm8,%xmm13 # qhasm: xmm8 = xmm10 # asm 1: movdqa xmm8=int6464#15 # asm 2: movdqa xmm8=%xmm14 movdqa %xmm9,%xmm14 # qhasm: xmm15 = xmm11 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm8,%xmm15 # qhasm: xmm10 |= xmm9 # asm 1: por xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm3,%xmm10 # qhasm: xmm12 ^= xmm0 # asm 1: pxor xmm13=int6464#11 # asm 2: movdqa xmm13=%xmm10 movdqa %xmm7,%xmm10 # qhasm: xmm13 ^= xmm1 # asm 1: pxor xmm12=int6464#12 # asm 2: movdqa xmm12=%xmm11 movdqa %xmm5,%xmm11 # qhasm: xmm9 = xmm13 # asm 1: movdqa xmm9=int6464#13 # asm 2: movdqa xmm9=%xmm12 movdqa %xmm10,%xmm12 # qhasm: xmm12 ^= xmm6 # asm 1: pxor xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm2,%xmm10 # qhasm: xmm13 = xmm4 # asm 1: movdqa xmm13=int6464#12 # asm 2: movdqa xmm13=%xmm11 movdqa %xmm4,%xmm11 # qhasm: xmm14 = xmm1 # asm 1: movdqa xmm14=int6464#14 # asm 2: movdqa xmm14=%xmm13 movdqa %xmm1,%xmm13 # qhasm: xmm15 = xmm7 # asm 1: movdqa xmm15=int6464#16 # asm 2: movdqa xmm15=%xmm15 movdqa %xmm7,%xmm15 # qhasm: xmm12 &= xmm3 # asm 1: pand xmm12=int6464#11 # asm 2: movdqa xmm12=%xmm10 movdqa %xmm8,%xmm10 # qhasm: xmm12 ^= xmm10 # asm 1: pxor xmm14=int6464#12 # asm 2: movdqa xmm14=%xmm11 movdqa %xmm14,%xmm11 # qhasm: xmm14 ^= xmm11 # asm 1: pxor xmm15=int6464#14 # asm 2: movdqa xmm15=%xmm13 movdqa %xmm10,%xmm13 # qhasm: xmm15 &= xmm14 # asm 1: pand xmm13=int6464#16 # asm 2: movdqa xmm13=%xmm15 movdqa %xmm12,%xmm15 # qhasm: xmm13 ^= xmm8 # asm 1: pxor xmm10=int6464#9 # asm 2: movdqa xmm10=%xmm8 movdqa %xmm11,%xmm8 # qhasm: xmm10 ^= xmm13 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm6,%xmm8 # qhasm: xmm8 = xmm5 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm5,%xmm9 # qhasm: xmm10 = xmm15 # asm 1: movdqa xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm10 ^= xmm14 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm12=int6464#9 # asm 2: movdqa xmm12=%xmm8 movdqa %xmm7,%xmm8 # qhasm: xmm8 = xmm1 # asm 1: movdqa xmm8=int6464#10 # asm 2: movdqa xmm8=%xmm9 movdqa %xmm1,%xmm9 # qhasm: xmm12 ^= xmm4 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm10=int6464#11 # asm 2: movdqa xmm10=%xmm10 movdqa %xmm15,%xmm10 # qhasm: xmm10 ^= xmm9 # asm 1: pxor xmm11=int6464#11 # asm 2: movdqa xmm11=%xmm10 movdqa %xmm13,%xmm10 # qhasm: xmm11 ^= xmm14 # asm 1: pxor xmm8=int6464#9 # asm 2: pshufd $0x93,xmm8=%xmm8 pshufd $0x93,%xmm0,%xmm8 # qhasm: xmm9 = shuffle dwords of xmm1 by 0x93 # asm 1: pshufd $0x93,xmm9=int6464#10 # asm 2: pshufd $0x93,xmm9=%xmm9 pshufd $0x93,%xmm1,%xmm9 # qhasm: xmm10 = shuffle dwords of xmm4 by 0x93 # asm 1: pshufd $0x93,xmm10=int6464#11 # asm 2: pshufd $0x93,xmm10=%xmm10 pshufd $0x93,%xmm4,%xmm10 # qhasm: xmm11 = shuffle dwords of xmm6 by 0x93 # asm 1: pshufd $0x93,xmm11=int6464#12 # asm 2: pshufd $0x93,xmm11=%xmm11 pshufd $0x93,%xmm6,%xmm11 # qhasm: xmm12 = shuffle dwords of xmm3 by 0x93 # asm 1: pshufd $0x93,xmm12=int6464#13 # asm 2: pshufd $0x93,xmm12=%xmm12 pshufd $0x93,%xmm3,%xmm12 # qhasm: xmm13 = shuffle dwords of xmm7 by 0x93 # asm 1: pshufd $0x93,xmm13=int6464#14 # asm 2: pshufd $0x93,xmm13=%xmm13 pshufd $0x93,%xmm7,%xmm13 # qhasm: xmm14 = shuffle dwords of xmm2 by 0x93 # asm 1: pshufd $0x93,xmm14=int6464#15 # asm 2: pshufd $0x93,xmm14=%xmm14 pshufd $0x93,%xmm2,%xmm14 # qhasm: xmm15 = shuffle dwords of xmm5 by 0x93 # asm 1: pshufd $0x93,xmm15=int6464#16 # asm 2: pshufd $0x93,xmm15=%xmm15 pshufd $0x93,%xmm5,%xmm15 # qhasm: xmm0 ^= xmm8 # asm 1: pxor xmm0=int6464#1 # asm 2: pshufd $0x4E,xmm0=%xmm0 pshufd $0x4E,%xmm0,%xmm0 # qhasm: xmm1 = shuffle dwords of xmm1 by 0x4E # asm 1: pshufd $0x4E,xmm1=int6464#2 # asm 2: pshufd $0x4E,xmm1=%xmm1 pshufd $0x4E,%xmm1,%xmm1 # qhasm: xmm4 = shuffle dwords of xmm4 by 0x4E # asm 1: pshufd $0x4E,xmm4=int6464#5 # asm 2: pshufd $0x4E,xmm4=%xmm4 pshufd $0x4E,%xmm4,%xmm4 # qhasm: xmm6 = shuffle dwords of xmm6 by 0x4E # asm 1: pshufd $0x4E,xmm6=int6464#7 # asm 2: pshufd $0x4E,xmm6=%xmm6 pshufd $0x4E,%xmm6,%xmm6 # qhasm: xmm3 = shuffle dwords of xmm3 by 0x4E # asm 1: pshufd $0x4E,xmm3=int6464#4 # asm 2: pshufd $0x4E,xmm3=%xmm3 pshufd $0x4E,%xmm3,%xmm3 # qhasm: xmm7 = shuffle dwords of xmm7 by 0x4E # asm 1: pshufd $0x4E,xmm7=int6464#8 # asm 2: pshufd $0x4E,xmm7=%xmm7 pshufd $0x4E,%xmm7,%xmm7 # qhasm: xmm2 = shuffle dwords of xmm2 by 0x4E # asm 1: pshufd $0x4E,xmm2=int6464#3 # asm 2: pshufd $0x4E,xmm2=%xmm2 pshufd $0x4E,%xmm2,%xmm2 # qhasm: xmm5 = shuffle dwords of xmm5 by 0x4E # asm 1: pshufd $0x4E,xmm5=int6464#6 # asm 2: pshufd $0x4E,xmm5=%xmm5 pshufd $0x4E,%xmm5,%xmm5 # qhasm: xmm8 ^= xmm0 # asm 1: pxor xmm3=int6464#1 # asm 2: movdqa xmm3=%xmm0 movdqa %xmm15,%xmm0 # qhasm: xmm2 = xmm9 # asm 1: movdqa xmm2=int6464#2 # asm 2: movdqa xmm2=%xmm1 movdqa %xmm9,%xmm1 # qhasm: xmm1 = xmm13 # asm 1: movdqa xmm1=int6464#3 # asm 2: movdqa xmm1=%xmm2 movdqa %xmm13,%xmm2 # qhasm: xmm5 = xmm10 # asm 1: movdqa xmm5=int6464#4 # asm 2: movdqa xmm5=%xmm3 movdqa %xmm10,%xmm3 # qhasm: xmm4 = xmm14 # asm 1: movdqa xmm4=int6464#5 # asm 2: movdqa xmm4=%xmm4 movdqa %xmm14,%xmm4 # qhasm: xmm3 ^= xmm12 # asm 1: pxor xmm6=int6464#6 # asm 2: movdqa xmm6=%xmm5 movdqa %xmm0,%xmm5 # qhasm: xmm0 = xmm2 # asm 1: movdqa xmm0=int6464#7 # asm 2: movdqa xmm0=%xmm6 movdqa %xmm1,%xmm6 # qhasm: xmm7 = xmm3 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm0,%xmm7 # qhasm: xmm2 |= xmm1 # asm 1: por xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm11,%xmm2 # qhasm: xmm4 ^= xmm8 # asm 1: pxor xmm5=int6464#3 # asm 2: movdqa xmm5=%xmm2 movdqa %xmm15,%xmm2 # qhasm: xmm5 ^= xmm9 # asm 1: pxor xmm4=int6464#4 # asm 2: movdqa xmm4=%xmm3 movdqa %xmm13,%xmm3 # qhasm: xmm1 = xmm5 # asm 1: movdqa xmm1=int6464#5 # asm 2: movdqa xmm1=%xmm4 movdqa %xmm2,%xmm4 # qhasm: xmm4 ^= xmm14 # asm 1: pxor xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm10,%xmm2 # qhasm: xmm5 = xmm12 # asm 1: movdqa xmm5=int6464#4 # asm 2: movdqa xmm5=%xmm3 movdqa %xmm12,%xmm3 # qhasm: xmm6 = xmm9 # asm 1: movdqa xmm6=int6464#6 # asm 2: movdqa xmm6=%xmm5 movdqa %xmm9,%xmm5 # qhasm: xmm7 = xmm15 # asm 1: movdqa xmm7=int6464#8 # asm 2: movdqa xmm7=%xmm7 movdqa %xmm15,%xmm7 # qhasm: xmm4 &= xmm11 # asm 1: pand xmm4=int6464#3 # asm 2: movdqa xmm4=%xmm2 movdqa %xmm0,%xmm2 # qhasm: xmm4 ^= xmm2 # asm 1: pxor xmm6=int6464#4 # asm 2: movdqa xmm6=%xmm3 movdqa %xmm6,%xmm3 # qhasm: xmm6 ^= xmm3 # asm 1: pxor xmm7=int6464#6 # asm 2: movdqa xmm7=%xmm5 movdqa %xmm2,%xmm5 # qhasm: xmm7 &= xmm6 # asm 1: pand xmm5=int6464#8 # asm 2: movdqa xmm5=%xmm7 movdqa %xmm4,%xmm7 # qhasm: xmm5 ^= xmm0 # asm 1: pxor xmm2=int6464#1 # asm 2: movdqa xmm2=%xmm0 movdqa %xmm3,%xmm0 # qhasm: xmm2 ^= xmm5 # asm 1: pxor xmm4=int6464#1 # asm 2: movdqa xmm4=%xmm0 movdqa %xmm14,%xmm0 # qhasm: xmm0 = xmm13 # asm 1: movdqa xmm0=int6464#2 # asm 2: movdqa xmm0=%xmm1 movdqa %xmm13,%xmm1 # qhasm: xmm2 = xmm7 # asm 1: movdqa xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm2 ^= xmm6 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm7,%xmm2 # qhasm: xmm2 ^= xmm1 # asm 1: pxor xmm4=int6464#1 # asm 2: movdqa xmm4=%xmm0 movdqa %xmm15,%xmm0 # qhasm: xmm0 = xmm9 # asm 1: movdqa xmm0=int6464#2 # asm 2: movdqa xmm0=%xmm1 movdqa %xmm9,%xmm1 # qhasm: xmm4 ^= xmm12 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm2=int6464#3 # asm 2: movdqa xmm2=%xmm2 movdqa %xmm7,%xmm2 # qhasm: xmm2 ^= xmm1 # asm 1: pxor xmm3=int6464#3 # asm 2: movdqa xmm3=%xmm2 movdqa %xmm5,%xmm2 # qhasm: xmm3 ^= xmm6 # asm 1: pxor xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm10,%xmm0 # qhasm: uint6464 xmm0 >>= 1 # asm 1: psrlq $1,xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm11,%xmm0 # qhasm: uint6464 xmm0 >>= 1 # asm 1: psrlq $1,xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm12,%xmm0 # qhasm: uint6464 xmm0 >>= 1 # asm 1: psrlq $1,xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm8,%xmm0 # qhasm: uint6464 xmm0 >>= 1 # asm 1: psrlq $1,xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm15,%xmm0 # qhasm: uint6464 xmm0 >>= 2 # asm 1: psrlq $2,xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm11,%xmm0 # qhasm: uint6464 xmm0 >>= 2 # asm 1: psrlq $2,xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm9,%xmm0 # qhasm: uint6464 xmm0 >>= 2 # asm 1: psrlq $2,xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm8,%xmm0 # qhasm: uint6464 xmm0 >>= 2 # asm 1: psrlq $2,xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm14,%xmm0 # qhasm: uint6464 xmm0 >>= 4 # asm 1: psrlq $4,xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm12,%xmm0 # qhasm: uint6464 xmm0 >>= 4 # asm 1: psrlq $4,xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm9,%xmm0 # qhasm: uint6464 xmm0 >>= 4 # asm 1: psrlq $4,xmm0=int6464#1 # asm 2: movdqa xmm0=%xmm0 movdqa %xmm8,%xmm0 # qhasm: uint6464 xmm0 >>= 4 # asm 1: psrlq $4,tmp=int64#6d # asm 2: movl 12(tmp=%r9d movl 12(%rcx),%r9d # qhasm: (uint32) bswap tmp # asm 1: bswap lensav=int64#5 # asm 2: mov lensav=%r8 mov %rdx,%r8 # qhasm: (uint32) len >>= 4 # asm 1: shr $4,tmp=int64#6d # asm 2: movl 12(tmp=%r9d movl 12(%rcx),%r9d # qhasm: (uint32) bswap tmp # asm 1: bswap blp=int64#3 # asm 2: leaq blp=%rdx leaq 32(%rsp),%rdx # qhasm: *(int128 *)(blp + 0) = xmm8 # asm 1: movdqa b=int64#4 # asm 2: movzbq 0(b=%rcx movzbq 0(%rdx),%rcx # qhasm: (uint8) b ^= *(uint8 *)(inp + 0) # asm 1: xorb 0(tmp=int64#3d # asm 2: movl 12(tmp=%edx movl 12(%rcx),%edx # qhasm: (uint32) bswap tmp # asm 1: bswap >= 4; tmp = load32_bigendian(np + 12); tmp += len; store32_bigendian(np + 12, tmp); blp = bl; *(int128 *)(blp + 0) = xmm8; *(int128 *)(blp + 16) = xmm9; *(int128 *)(blp + 32) = xmm12; *(int128 *)(blp + 48) = xmm14; *(int128 *)(blp + 64) = xmm11; *(int128 *)(blp + 80) = xmm15; *(int128 *)(blp + 96) = xmm10; *(int128 *)(blp + 112) = xmm13; bytes: if(lensav == 0) goto end; b = blp[0]; *(unsigned char *)(outp + 0) = b; blp += 1; outp +=1; lensav -= 1; goto bytes; full: tmp = load32_bigendian(np + 12); tmp += 8; store32_bigendian(np + 12, tmp); *(int128 *) (outp + 0) = xmm8; *(int128 *) (outp + 16) = xmm9; *(int128 *) (outp + 32) = xmm12; *(int128 *) (outp + 48) = xmm14; *(int128 *) (outp + 64) = xmm11; *(int128 *) (outp + 80) = xmm15; *(int128 *) (outp + 96) = xmm10; *(int128 *) (outp + 112) = xmm13; end: return 0; } curvedns-curvedns-0.87/nacl/crypto_stream/aes128ctr/portable/api.h000066400000000000000000000001321150631715100252020ustar00rootroot00000000000000#define CRYPTO_KEYBYTES 16 #define CRYPTO_NONCEBYTES 16 #define CRYPTO_BEFORENMBYTES 1408 curvedns-curvedns-0.87/nacl/crypto_stream/aes128ctr/portable/beforenm.c000066400000000000000000000041141150631715100262250ustar00rootroot00000000000000/* Author: Peter Schwabe, ported from an assembly implementation by Emilia Käsper * Date: 2009-03-19 * Public domain */ #include "consts.h" #include "int128.h" #include "common.h" #include "crypto_stream.h" int crypto_stream_beforenm(unsigned char *c, const unsigned char *k) { /* int64 x0; int64 x1; int64 x2; int64 x3; int64 e; int64 q0; int64 q1; int64 q2; int64 q3; */ int128 xmm0; int128 xmm1; int128 xmm2; int128 xmm3; int128 xmm4; int128 xmm5; int128 xmm6; int128 xmm7; int128 xmm8; int128 xmm9; int128 xmm10; int128 xmm11; int128 xmm12; int128 xmm13; int128 xmm14; int128 xmm15; int128 t; bitslicekey0(k, c) keyexpbs1(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c) keyexpbs(xmm0, xmm1, xmm4, xmm6, xmm3, xmm7, xmm2, xmm5, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm1);, 2,c) keyexpbs(xmm0, xmm1, xmm3, xmm2, xmm6, xmm5, xmm4, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm6);, 3,c) keyexpbs(xmm0, xmm1, xmm6, xmm4, xmm2, xmm7, xmm3, xmm5, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm3);, 4,c) keyexpbs(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm3);, 5,c) keyexpbs(xmm0, xmm1, xmm4, xmm6, xmm3, xmm7, xmm2, xmm5, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm5);, 6,c) keyexpbs(xmm0, xmm1, xmm3, xmm2, xmm6, xmm5, xmm4, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm3);, 7,c) keyexpbs(xmm0, xmm1, xmm6, xmm4, xmm2, xmm7, xmm3, xmm5, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm7);, 8,c) keyexpbs(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm0); xor_rcon(&xmm1); xor_rcon(&xmm6); xor_rcon(&xmm3);, 9,c) keyexpbs10(xmm0, xmm1, xmm4, xmm6, xmm3, xmm7, xmm2, xmm5, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c) return 0; } curvedns-curvedns-0.87/nacl/crypto_stream/aes128ctr/portable/common.c000066400000000000000000000022301150631715100257150ustar00rootroot00000000000000#include "common.h" uint32 load32_bigendian(const unsigned char *x) { return (uint32) (x[3]) \ | (((uint32) (x[2])) << 8) \ | (((uint32) (x[1])) << 16) \ | (((uint32) (x[0])) << 24) ; } void store32_bigendian(unsigned char *x,uint32 u) { x[3] = u; u >>= 8; x[2] = u; u >>= 8; x[1] = u; u >>= 8; x[0] = u; } uint32 load32_littleendian(const unsigned char *x) { return (uint32) (x[0]) \ | (((uint32) (x[1])) << 8) \ | (((uint32) (x[2])) << 16) \ | (((uint32) (x[3])) << 24) ; } void store32_littleendian(unsigned char *x,uint32 u) { x[0] = u; u >>= 8; x[1] = u; u >>= 8; x[2] = u; u >>= 8; x[3] = u; } uint64 load64_littleendian(const unsigned char *x) { return (uint64) (x[0]) \ | (((uint64) (x[1])) << 8) \ | (((uint64) (x[2])) << 16) \ | (((uint64) (x[3])) << 24) | (((uint64) (x[4])) << 32) | (((uint64) (x[5])) << 40) | (((uint64) (x[6])) << 48) | (((uint64) (x[7])) << 56) ; } void store64_littleendian(unsigned char *x,uint64 u) { x[0] = u; u >>= 8; x[1] = u; u >>= 8; x[2] = u; u >>= 8; x[3] = u; u >>= 8; x[4] = u; u >>= 8; x[5] = u; u >>= 8; x[6] = u; u >>= 8; x[7] = u; } curvedns-curvedns-0.87/nacl/crypto_stream/aes128ctr/portable/common.h000066400000000000000000000451571150631715100257410ustar00rootroot00000000000000/* Author: Peter Schwabe, ported from an assembly implementation by Emilia Käsper Date: 2009-03-19 Public domain */ #ifndef COMMON_H #define COMMON_H #include "types.h" #define load32_bigendian crypto_stream_aes128ctr_portable_load32_bigendian uint32 load32_bigendian(const unsigned char *x); #define store32_bigendian crypto_stream_aes128ctr_portable_store32_bigendian void store32_bigendian(unsigned char *x,uint32 u); #define load32_littleendian crypto_stream_aes128ctr_portable_load32_littleendian uint32 load32_littleendian(const unsigned char *x); #define store32_littleendian crypto_stream_aes128ctr_portable_store32_littleendian void store32_littleendian(unsigned char *x,uint32 u); #define load64_littleendian crypto_stream_aes128ctr_portable_load64_littleendian uint64 load64_littleendian(const unsigned char *x); #define store64_littleendian crypto_stream_aes128ctr_portable_store64_littleendian void store64_littleendian(unsigned char *x,uint64 u); /* Macros required only for key expansion */ #define keyexpbs1(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7, bskey) \ rotbyte(&b0);\ rotbyte(&b1);\ rotbyte(&b2);\ rotbyte(&b3);\ rotbyte(&b4);\ rotbyte(&b5);\ rotbyte(&b6);\ rotbyte(&b7);\ ;\ sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7);\ ;\ xor_rcon(&b0);\ shufb(&b0, EXPB0);\ shufb(&b1, EXPB0);\ shufb(&b4, EXPB0);\ shufb(&b6, EXPB0);\ shufb(&b3, EXPB0);\ shufb(&b7, EXPB0);\ shufb(&b2, EXPB0);\ shufb(&b5, EXPB0);\ shufb(&b0, EXPB0);\ ;\ t0 = *(int128 *)(bskey + 0);\ t1 = *(int128 *)(bskey + 16);\ t2 = *(int128 *)(bskey + 32);\ t3 = *(int128 *)(bskey + 48);\ t4 = *(int128 *)(bskey + 64);\ t5 = *(int128 *)(bskey + 80);\ t6 = *(int128 *)(bskey + 96);\ t7 = *(int128 *)(bskey + 112);\ ;\ xor2(&b0, &t0);\ xor2(&b1, &t1);\ xor2(&b4, &t2);\ xor2(&b6, &t3);\ xor2(&b3, &t4);\ xor2(&b7, &t5);\ xor2(&b2, &t6);\ xor2(&b5, &t7);\ ;\ rshift32_littleendian(&t0, 8);\ rshift32_littleendian(&t1, 8);\ rshift32_littleendian(&t2, 8);\ rshift32_littleendian(&t3, 8);\ rshift32_littleendian(&t4, 8);\ rshift32_littleendian(&t5, 8);\ rshift32_littleendian(&t6, 8);\ rshift32_littleendian(&t7, 8);\ ;\ xor2(&b0, &t0);\ xor2(&b1, &t1);\ xor2(&b4, &t2);\ xor2(&b6, &t3);\ xor2(&b3, &t4);\ xor2(&b7, &t5);\ xor2(&b2, &t6);\ xor2(&b5, &t7);\ ;\ rshift32_littleendian(&t0, 8);\ rshift32_littleendian(&t1, 8);\ rshift32_littleendian(&t2, 8);\ rshift32_littleendian(&t3, 8);\ rshift32_littleendian(&t4, 8);\ rshift32_littleendian(&t5, 8);\ rshift32_littleendian(&t6, 8);\ rshift32_littleendian(&t7, 8);\ ;\ xor2(&b0, &t0);\ xor2(&b1, &t1);\ xor2(&b4, &t2);\ xor2(&b6, &t3);\ xor2(&b3, &t4);\ xor2(&b7, &t5);\ xor2(&b2, &t6);\ xor2(&b5, &t7);\ ;\ rshift32_littleendian(&t0, 8);\ rshift32_littleendian(&t1, 8);\ rshift32_littleendian(&t2, 8);\ rshift32_littleendian(&t3, 8);\ rshift32_littleendian(&t4, 8);\ rshift32_littleendian(&t5, 8);\ rshift32_littleendian(&t6, 8);\ rshift32_littleendian(&t7, 8);\ ;\ xor2(&b0, &t0);\ xor2(&b1, &t1);\ xor2(&b4, &t2);\ xor2(&b6, &t3);\ xor2(&b3, &t4);\ xor2(&b7, &t5);\ xor2(&b2, &t6);\ xor2(&b5, &t7);\ ;\ *(int128 *)(bskey + 128) = b0;\ *(int128 *)(bskey + 144) = b1;\ *(int128 *)(bskey + 160) = b4;\ *(int128 *)(bskey + 176) = b6;\ *(int128 *)(bskey + 192) = b3;\ *(int128 *)(bskey + 208) = b7;\ *(int128 *)(bskey + 224) = b2;\ *(int128 *)(bskey + 240) = b5;\ #define keyexpbs10(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7, bskey) ;\ toggle(&b0);\ toggle(&b1);\ toggle(&b5);\ toggle(&b6);\ rotbyte(&b0);\ rotbyte(&b1);\ rotbyte(&b2);\ rotbyte(&b3);\ rotbyte(&b4);\ rotbyte(&b5);\ rotbyte(&b6);\ rotbyte(&b7);\ ;\ sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7);\ ;\ xor_rcon(&b1);\ xor_rcon(&b4);\ xor_rcon(&b3);\ xor_rcon(&b7);\ shufb(&b0, EXPB0);\ shufb(&b1, EXPB0);\ shufb(&b4, EXPB0);\ shufb(&b6, EXPB0);\ shufb(&b3, EXPB0);\ shufb(&b7, EXPB0);\ shufb(&b2, EXPB0);\ shufb(&b5, EXPB0);\ ;\ t0 = *(int128 *)(bskey + 9 * 128 + 0);\ t1 = *(int128 *)(bskey + 9 * 128 + 16);\ t2 = *(int128 *)(bskey + 9 * 128 + 32);\ t3 = *(int128 *)(bskey + 9 * 128 + 48);\ t4 = *(int128 *)(bskey + 9 * 128 + 64);\ t5 = *(int128 *)(bskey + 9 * 128 + 80);\ t6 = *(int128 *)(bskey + 9 * 128 + 96);\ t7 = *(int128 *)(bskey + 9 * 128 + 112);\ ;\ toggle(&t0);\ toggle(&t1);\ toggle(&t5);\ toggle(&t6);\ ;\ xor2(&b0, &t0);\ xor2(&b1, &t1);\ xor2(&b4, &t2);\ xor2(&b6, &t3);\ xor2(&b3, &t4);\ xor2(&b7, &t5);\ xor2(&b2, &t6);\ xor2(&b5, &t7);\ ;\ rshift32_littleendian(&t0, 8);\ rshift32_littleendian(&t1, 8);\ rshift32_littleendian(&t2, 8);\ rshift32_littleendian(&t3, 8);\ rshift32_littleendian(&t4, 8);\ rshift32_littleendian(&t5, 8);\ rshift32_littleendian(&t6, 8);\ rshift32_littleendian(&t7, 8);\ ;\ xor2(&b0, &t0);\ xor2(&b1, &t1);\ xor2(&b4, &t2);\ xor2(&b6, &t3);\ xor2(&b3, &t4);\ xor2(&b7, &t5);\ xor2(&b2, &t6);\ xor2(&b5, &t7);\ ;\ rshift32_littleendian(&t0, 8);\ rshift32_littleendian(&t1, 8);\ rshift32_littleendian(&t2, 8);\ rshift32_littleendian(&t3, 8);\ rshift32_littleendian(&t4, 8);\ rshift32_littleendian(&t5, 8);\ rshift32_littleendian(&t6, 8);\ rshift32_littleendian(&t7, 8);\ ;\ xor2(&b0, &t0);\ xor2(&b1, &t1);\ xor2(&b4, &t2);\ xor2(&b6, &t3);\ xor2(&b3, &t4);\ xor2(&b7, &t5);\ xor2(&b2, &t6);\ xor2(&b5, &t7);\ ;\ rshift32_littleendian(&t0, 8);\ rshift32_littleendian(&t1, 8);\ rshift32_littleendian(&t2, 8);\ rshift32_littleendian(&t3, 8);\ rshift32_littleendian(&t4, 8);\ rshift32_littleendian(&t5, 8);\ rshift32_littleendian(&t6, 8);\ rshift32_littleendian(&t7, 8);\ ;\ xor2(&b0, &t0);\ xor2(&b1, &t1);\ xor2(&b4, &t2);\ xor2(&b6, &t3);\ xor2(&b3, &t4);\ xor2(&b7, &t5);\ xor2(&b2, &t6);\ xor2(&b5, &t7);\ ;\ shufb(&b0, M0);\ shufb(&b1, M0);\ shufb(&b2, M0);\ shufb(&b3, M0);\ shufb(&b4, M0);\ shufb(&b5, M0);\ shufb(&b6, M0);\ shufb(&b7, M0);\ ;\ *(int128 *)(bskey + 1280) = b0;\ *(int128 *)(bskey + 1296) = b1;\ *(int128 *)(bskey + 1312) = b4;\ *(int128 *)(bskey + 1328) = b6;\ *(int128 *)(bskey + 1344) = b3;\ *(int128 *)(bskey + 1360) = b7;\ *(int128 *)(bskey + 1376) = b2;\ *(int128 *)(bskey + 1392) = b5;\ #define keyexpbs(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7, rcon, i, bskey) \ toggle(&b0);\ toggle(&b1);\ toggle(&b5);\ toggle(&b6);\ rotbyte(&b0);\ rotbyte(&b1);\ rotbyte(&b2);\ rotbyte(&b3);\ rotbyte(&b4);\ rotbyte(&b5);\ rotbyte(&b6);\ rotbyte(&b7);\ ;\ sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7);\ ;\ rcon;\ shufb(&b0, EXPB0);\ shufb(&b1, EXPB0);\ shufb(&b4, EXPB0);\ shufb(&b6, EXPB0);\ shufb(&b3, EXPB0);\ shufb(&b7, EXPB0);\ shufb(&b2, EXPB0);\ shufb(&b5, EXPB0);\ ;\ t0 = *(int128 *)(bskey + (i-1) * 128 + 0);\ t1 = *(int128 *)(bskey + (i-1) * 128 + 16);\ t2 = *(int128 *)(bskey + (i-1) * 128 + 32);\ t3 = *(int128 *)(bskey + (i-1) * 128 + 48);\ t4 = *(int128 *)(bskey + (i-1) * 128 + 64);\ t5 = *(int128 *)(bskey + (i-1) * 128 + 80);\ t6 = *(int128 *)(bskey + (i-1) * 128 + 96);\ t7 = *(int128 *)(bskey + (i-1) * 128 + 112);\ ;\ toggle(&t0);\ toggle(&t1);\ toggle(&t5);\ toggle(&t6);\ ;\ xor2(&b0, &t0);\ xor2(&b1, &t1);\ xor2(&b4, &t2);\ xor2(&b6, &t3);\ xor2(&b3, &t4);\ xor2(&b7, &t5);\ xor2(&b2, &t6);\ xor2(&b5, &t7);\ ;\ rshift32_littleendian(&t0, 8);\ rshift32_littleendian(&t1, 8);\ rshift32_littleendian(&t2, 8);\ rshift32_littleendian(&t3, 8);\ rshift32_littleendian(&t4, 8);\ rshift32_littleendian(&t5, 8);\ rshift32_littleendian(&t6, 8);\ rshift32_littleendian(&t7, 8);\ ;\ xor2(&b0, &t0);\ xor2(&b1, &t1);\ xor2(&b4, &t2);\ xor2(&b6, &t3);\ xor2(&b3, &t4);\ xor2(&b7, &t5);\ xor2(&b2, &t6);\ xor2(&b5, &t7);\ ;\ rshift32_littleendian(&t0, 8);\ rshift32_littleendian(&t1, 8);\ rshift32_littleendian(&t2, 8);\ rshift32_littleendian(&t3, 8);\ rshift32_littleendian(&t4, 8);\ rshift32_littleendian(&t5, 8);\ rshift32_littleendian(&t6, 8);\ rshift32_littleendian(&t7, 8);\ ;\ xor2(&b0, &t0);\ xor2(&b1, &t1);\ xor2(&b4, &t2);\ xor2(&b6, &t3);\ xor2(&b3, &t4);\ xor2(&b7, &t5);\ xor2(&b2, &t6);\ xor2(&b5, &t7);\ ;\ rshift32_littleendian(&t0, 8);\ rshift32_littleendian(&t1, 8);\ rshift32_littleendian(&t2, 8);\ rshift32_littleendian(&t3, 8);\ rshift32_littleendian(&t4, 8);\ rshift32_littleendian(&t5, 8);\ rshift32_littleendian(&t6, 8);\ rshift32_littleendian(&t7, 8);\ ;\ xor2(&b0, &t0);\ xor2(&b1, &t1);\ xor2(&b4, &t2);\ xor2(&b6, &t3);\ xor2(&b3, &t4);\ xor2(&b7, &t5);\ xor2(&b2, &t6);\ xor2(&b5, &t7);\ ;\ *(int128 *)(bskey + i*128 + 0) = b0;\ *(int128 *)(bskey + i*128 + 16) = b1;\ *(int128 *)(bskey + i*128 + 32) = b4;\ *(int128 *)(bskey + i*128 + 48) = b6;\ *(int128 *)(bskey + i*128 + 64) = b3;\ *(int128 *)(bskey + i*128 + 80) = b7;\ *(int128 *)(bskey + i*128 + 96) = b2;\ *(int128 *)(bskey + i*128 + 112) = b5;\ /* Macros used in multiple contexts */ #define bitslicekey0(key, bskey) \ xmm0 = *(int128 *) (key + 0);\ shufb(&xmm0, M0);\ copy2(&xmm1, &xmm0);\ copy2(&xmm2, &xmm0);\ copy2(&xmm3, &xmm0);\ copy2(&xmm4, &xmm0);\ copy2(&xmm5, &xmm0);\ copy2(&xmm6, &xmm0);\ copy2(&xmm7, &xmm0);\ ;\ bitslice(xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0, t);\ ;\ *(int128 *) (bskey + 0) = xmm0;\ *(int128 *) (bskey + 16) = xmm1;\ *(int128 *) (bskey + 32) = xmm2;\ *(int128 *) (bskey + 48) = xmm3;\ *(int128 *) (bskey + 64) = xmm4;\ *(int128 *) (bskey + 80) = xmm5;\ *(int128 *) (bskey + 96) = xmm6;\ *(int128 *) (bskey + 112) = xmm7;\ #define bitslicekey10(key, bskey) \ xmm0 = *(int128 *) (key + 0);\ copy2(xmm1, xmm0);\ copy2(xmm2, xmm0);\ copy2(xmm3, xmm0);\ copy2(xmm4, xmm0);\ copy2(xmm5, xmm0);\ copy2(xmm6, xmm0);\ copy2(xmm7, xmm0);\ ;\ bitslice(xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0, t);\ ;\ toggle(&xmm6);\ toggle(&xmm5);\ toggle(&xmm1);\ toggle(&xmm0);\ ;\ *(int128 *) (bskey + 0 + 1280) = xmm0;\ *(int128 *) (bskey + 16 + 1280) = xmm1;\ *(int128 *) (bskey + 32 + 1280) = xmm2;\ *(int128 *) (bskey + 48 + 1280) = xmm3;\ *(int128 *) (bskey + 64 + 1280) = xmm4;\ *(int128 *) (bskey + 80 + 1280) = xmm5;\ *(int128 *) (bskey + 96 + 1280) = xmm6;\ *(int128 *) (bskey + 112 + 1280) = xmm7;\ #define bitslicekey(i,key,bskey) \ xmm0 = *(int128 *) (key + 0);\ shufb(&xmm0, M0);\ copy2(&xmm1, &xmm0);\ copy2(&xmm2, &xmm0);\ copy2(&xmm3, &xmm0);\ copy2(&xmm4, &xmm0);\ copy2(&xmm5, &xmm0);\ copy2(&xmm6, &xmm0);\ copy2(&xmm7, &xmm0);\ ;\ bitslice(xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0, t);\ ;\ toggle(&xmm6);\ toggle(&xmm5);\ toggle(&xmm1);\ toggle(&xmm0);\ ;\ *(int128 *) (bskey + 0 + 128*i) = xmm0;\ *(int128 *) (bskey + 16 + 128*i) = xmm1;\ *(int128 *) (bskey + 32 + 128*i) = xmm2;\ *(int128 *) (bskey + 48 + 128*i) = xmm3;\ *(int128 *) (bskey + 64 + 128*i) = xmm4;\ *(int128 *) (bskey + 80 + 128*i) = xmm5;\ *(int128 *) (bskey + 96 + 128*i) = xmm6;\ *(int128 *) (bskey + 112 + 128*i) = xmm7;\ #define bitslice(x0, x1, x2, x3, x4, x5, x6, x7, t) \ swapmove(x0, x1, 1, BS0, t);\ swapmove(x2, x3, 1, BS0, t);\ swapmove(x4, x5, 1, BS0, t);\ swapmove(x6, x7, 1, BS0, t);\ ;\ swapmove(x0, x2, 2, BS1, t);\ swapmove(x1, x3, 2, BS1, t);\ swapmove(x4, x6, 2, BS1, t);\ swapmove(x5, x7, 2, BS1, t);\ ;\ swapmove(x0, x4, 4, BS2, t);\ swapmove(x1, x5, 4, BS2, t);\ swapmove(x2, x6, 4, BS2, t);\ swapmove(x3, x7, 4, BS2, t);\ #define swapmove(a, b, n, m, t) \ copy2(&t, &b);\ rshift64_littleendian(&t, n);\ xor2(&t, &a);\ and2(&t, &m);\ xor2(&a, &t);\ lshift64_littleendian(&t, n);\ xor2(&b, &t); #define rotbyte(x) \ shufb(x, ROTB) /* TODO: Make faster */ /* Macros used for encryption (and decryption) */ #define shiftrows(x0, x1, x2, x3, x4, x5, x6, x7, i, M, bskey) \ xor2(&x0, (int128 *)(bskey + 128*(i-1) + 0));\ shufb(&x0, M);\ xor2(&x1, (int128 *)(bskey + 128*(i-1) + 16));\ shufb(&x1, M);\ xor2(&x2, (int128 *)(bskey + 128*(i-1) + 32));\ shufb(&x2, M);\ xor2(&x3, (int128 *)(bskey + 128*(i-1) + 48));\ shufb(&x3, M);\ xor2(&x4, (int128 *)(bskey + 128*(i-1) + 64));\ shufb(&x4, M);\ xor2(&x5, (int128 *)(bskey + 128*(i-1) + 80));\ shufb(&x5, M);\ xor2(&x6, (int128 *)(bskey + 128*(i-1) + 96));\ shufb(&x6, M);\ xor2(&x7, (int128 *)(bskey + 128*(i-1) + 112));\ shufb(&x7, M);\ #define mixcolumns(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, t7) \ shufd(&t0, &x0, 0x93);\ shufd(&t1, &x1, 0x93);\ shufd(&t2, &x2, 0x93);\ shufd(&t3, &x3, 0x93);\ shufd(&t4, &x4, 0x93);\ shufd(&t5, &x5, 0x93);\ shufd(&t6, &x6, 0x93);\ shufd(&t7, &x7, 0x93);\ ;\ xor2(&x0, &t0);\ xor2(&x1, &t1);\ xor2(&x2, &t2);\ xor2(&x3, &t3);\ xor2(&x4, &t4);\ xor2(&x5, &t5);\ xor2(&x6, &t6);\ xor2(&x7, &t7);\ ;\ xor2(&t0, &x7);\ xor2(&t1, &x0);\ xor2(&t2, &x1);\ xor2(&t1, &x7);\ xor2(&t3, &x2);\ xor2(&t4, &x3);\ xor2(&t5, &x4);\ xor2(&t3, &x7);\ xor2(&t6, &x5);\ xor2(&t7, &x6);\ xor2(&t4, &x7);\ ;\ shufd(&x0, &x0, 0x4e);\ shufd(&x1, &x1, 0x4e);\ shufd(&x2, &x2, 0x4e);\ shufd(&x3, &x3, 0x4e);\ shufd(&x4, &x4, 0x4e);\ shufd(&x5, &x5, 0x4e);\ shufd(&x6, &x6, 0x4e);\ shufd(&x7, &x7, 0x4e);\ ;\ xor2(&t0, &x0);\ xor2(&t1, &x1);\ xor2(&t2, &x2);\ xor2(&t3, &x3);\ xor2(&t4, &x4);\ xor2(&t5, &x5);\ xor2(&t6, &x6);\ xor2(&t7, &x7);\ #define aesround(i, b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7, bskey) \ shiftrows(b0, b1, b2, b3, b4, b5, b6, b7, i, SR, bskey);\ sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7);\ mixcolumns(b0, b1, b4, b6, b3, b7, b2, b5, t0, t1, t2, t3, t4, t5, t6, t7);\ #define lastround(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7, bskey) \ shiftrows(b0, b1, b2, b3, b4, b5, b6, b7, 10, SRM0, bskey);\ sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7);\ xor2(&b0,(int128 *)(bskey + 128*10));\ xor2(&b1,(int128 *)(bskey + 128*10+16));\ xor2(&b4,(int128 *)(bskey + 128*10+32));\ xor2(&b6,(int128 *)(bskey + 128*10+48));\ xor2(&b3,(int128 *)(bskey + 128*10+64));\ xor2(&b7,(int128 *)(bskey + 128*10+80));\ xor2(&b2,(int128 *)(bskey + 128*10+96));\ xor2(&b5,(int128 *)(bskey + 128*10+112));\ #define sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, s0, s1, s2, s3) \ InBasisChange(b0, b1, b2, b3, b4, b5, b6, b7); \ Inv_GF256(b6, b5, b0, b3, b7, b1, b4, b2, t0, t1, t2, t3, s0, s1, s2, s3); \ OutBasisChange(b7, b1, b4, b2, b6, b5, b0, b3); \ #define InBasisChange(b0, b1, b2, b3, b4, b5, b6, b7) \ xor2(&b5, &b6);\ xor2(&b2, &b1);\ xor2(&b5, &b0);\ xor2(&b6, &b2);\ xor2(&b3, &b0);\ ;\ xor2(&b6, &b3);\ xor2(&b3, &b7);\ xor2(&b3, &b4);\ xor2(&b7, &b5);\ xor2(&b3, &b1);\ ;\ xor2(&b4, &b5);\ xor2(&b2, &b7);\ xor2(&b1, &b5);\ #define OutBasisChange(b0, b1, b2, b3, b4, b5, b6, b7) \ xor2(&b0, &b6);\ xor2(&b1, &b4);\ xor2(&b2, &b0);\ xor2(&b4, &b6);\ xor2(&b6, &b1);\ ;\ xor2(&b1, &b5);\ xor2(&b5, &b3);\ xor2(&b2, &b5);\ xor2(&b3, &b7);\ xor2(&b7, &b5);\ ;\ xor2(&b4, &b7);\ #define Mul_GF4(x0, x1, y0, y1, t0) \ copy2(&t0, &y0);\ xor2(&t0, &y1);\ and2(&t0, &x0);\ xor2(&x0, &x1);\ and2(&x0, &y1);\ and2(&x1, &y0);\ xor2(&x0, &x1);\ xor2(&x1, &t0);\ #define Mul_GF4_N(x0, x1, y0, y1, t0) \ copy2(&t0, &y0);\ xor2(&t0, &y1);\ and2(&t0, &x0);\ xor2(&x0, &x1);\ and2(&x0, &y1);\ and2(&x1, &y0);\ xor2(&x1, &x0);\ xor2(&x0, &t0);\ #define Mul_GF4_2(x0, x1, x2, x3, y0, y1, t0, t1) \ copy2(&t0, = y0);\ xor2(&t0, &y1);\ copy2(&t1, &t0);\ and2(&t0, &x0);\ and2(&t1, &x2);\ xor2(&x0, &x1);\ xor2(&x2, &x3);\ and2(&x0, &y1);\ and2(&x2, &y1);\ and2(&x1, &y0);\ and2(&x3, &y0);\ xor2(&x0, &x1);\ xor2(&x2, &x3);\ xor2(&x1, &t0);\ xor2(&x3, &t1);\ #define Mul_GF16(x0, x1, x2, x3, y0, y1, y2, y3, t0, t1, t2, t3) \ copy2(&t0, &x0);\ copy2(&t1, &x1);\ Mul_GF4(x0, x1, y0, y1, t2);\ xor2(&t0, &x2);\ xor2(&t1, &x3);\ xor2(&y0, &y2);\ xor2(&y1, &y3);\ Mul_GF4_N(t0, t1, y0, y1, t2);\ Mul_GF4(x2, x3, y2, y3, t3);\ ;\ xor2(&x0, &t0);\ xor2(&x2, &t0);\ xor2(&x1, &t1);\ xor2(&x3, &t1);\ #define Mul_GF16_2(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, t0, t1, t2, t3) \ copy2(&t0, &x0);\ copy2(&t1, &x1);\ Mul_GF4(x0, x1, y0, y1, t2);\ xor2(&t0, &x2);\ xor2(&t1, &x3);\ xor2(&y0, &y2);\ xor2(&y1, &y3);\ Mul_GF4_N(t0, t1, y0, y1, t3);\ Mul_GF4(x2, x3, y2, y3, t2);\ ;\ xor2(&x0, &t0);\ xor2(&x2, &t0);\ xor2(&x1, &t1);\ xor2(&x3, &t1);\ ;\ copy2(&t0, &x4);\ copy2(&t1, &x5);\ xor2(&t0, &x6);\ xor2(&t1, &x7);\ Mul_GF4_N(t0, t1, y0, y1, t3);\ Mul_GF4(x6, x7, y2, y3, t2);\ xor2(&y0, &y2);\ xor2(&y1, &y3);\ Mul_GF4(x4, x5, y0, y1, t3);\ ;\ xor2(&x4, &t0);\ xor2(&x6, &t0);\ xor2(&x5, &t1);\ xor2(&x7, &t1);\ #define Inv_GF16(x0, x1, x2, x3, t0, t1, t2, t3) \ copy2(&t0, &x1);\ copy2(&t1, &x0);\ and2(&t0, &x3);\ or2(&t1, &x2);\ copy2(&t2, &x1);\ copy2(&t3, &x0);\ or2(&t2, &x2);\ or2(&t3, &x3);\ xor2(&t2, &t3);\ ;\ xor2(&t0, &t2);\ xor2(&t1, &t2);\ ;\ Mul_GF4_2(x0, x1, x2, x3, t1, t0, t2, t3);\ #define Inv_GF256(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, s0, s1, s2, s3) \ copy2(&t3, &x4);\ copy2(&t2, &x5);\ copy2(&t1, &x1);\ copy2(&s1, &x7);\ copy2(&s0, &x0);\ ;\ xor2(&t3, &x6);\ xor2(&t2, &x7);\ xor2(&t1, &x3);\ xor2(&s1, &x6);\ xor2(&s0, &x2);\ ;\ copy2(&s2, &t3);\ copy2(&t0, &t2);\ copy2(&s3, &t3);\ ;\ or2(&t2, &t1);\ or2(&t3, &s0);\ xor2(&s3, &t0);\ and2(&s2, &s0);\ and2(&t0, &t1);\ xor2(&s0, &t1);\ and2(&s3, &s0);\ copy2(&s0, &x3);\ xor2(&s0, &x2);\ and2(&s1, &s0);\ xor2(&t3, &s1);\ xor2(&t2, &s1);\ copy2(&s1, &x4);\ xor2(&s1, &x5);\ copy2(&s0, &x1);\ copy2(&t1, &s1);\ xor2(&s0, &x0);\ or2(&t1, &s0);\ and2(&s1, &s0);\ xor2(&t0, &s1);\ xor2(&t3, &s3);\ xor2(&t2, &s2);\ xor2(&t1, &s3);\ xor2(&t0, &s2);\ xor2(&t1, &s2);\ copy2(&s0, &x7);\ copy2(&s1, &x6);\ copy2(&s2, &x5);\ copy2(&s3, &x4);\ and2(&s0, &x3);\ and2(&s1, &x2);\ and2(&s2, &x1);\ or2(&s3, &x0);\ xor2(&t3, &s0);\ xor2(&t2, &s1);\ xor2(&t1, &s2);\ xor2(&t0, &s3);\ ;\ copy2(&s0, &t3);\ xor2(&s0, &t2);\ and2(&t3, &t1);\ copy2(&s2, &t0);\ xor2(&s2, &t3);\ copy2(&s3, &s0);\ and2(&s3, &s2);\ xor2(&s3, &t2);\ copy2(&s1, &t1);\ xor2(&s1, &t0);\ xor2(&t3, &t2);\ and2(&s1, &t3);\ xor2(&s1, &t0);\ xor2(&t1, &s1);\ copy2(&t2, &s2);\ xor2(&t2, &s1);\ and2(&t2, &t0);\ xor2(&t1, &t2);\ xor2(&s2, &t2);\ and2(&s2, &s3);\ xor2(&s2, &s0);\ ;\ Mul_GF16_2(x0, x1, x2, x3, x4, x5, x6, x7, s3, s2, s1, t1, s0, t0, t2, t3);\ #endif curvedns-curvedns-0.87/nacl/crypto_stream/aes128ctr/portable/consts.c000066400000000000000000000021531150631715100257420ustar00rootroot00000000000000#include "consts.h" const unsigned char ROTB[16] = {0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x08}; const unsigned char M0[16] = {0x0f, 0x0b, 0x07, 0x03, 0x0e, 0x0a, 0x06, 0x02, 0x0d, 0x09, 0x05, 0x01, 0x0c, 0x08, 0x04, 0x00}; const unsigned char EXPB0[16] = {0x03, 0x03, 0x03, 0x03, 0x07, 0x07, 0x07, 0x07, 0x0b, 0x0b, 0x0b, 0x0b, 0x0f, 0x0f, 0x0f, 0x0f}; const unsigned char SWAP32[16] = {0x03, 0x02, 0x01, 0x00, 0x07, 0x06, 0x05, 0x04, 0x0b, 0x0a, 0x09, 0x08, 0x0f, 0x0e, 0x0d, 0x0c}; const unsigned char M0SWAP[16] = {0x0c, 0x08, 0x04, 0x00, 0x0d, 0x09, 0x05, 0x01, 0x0e, 0x0a, 0x06, 0x02, 0x0f, 0x0b, 0x07, 0x03}; const unsigned char SR[16] = {0x01, 0x02, 0x03, 0x00, 0x06, 0x07, 0x04, 0x05, 0x0b, 0x08, 0x09, 0x0a, 0x0c, 0x0d, 0x0e, 0x0f}; const unsigned char SRM0[16] = {0x0f, 0x0a, 0x05, 0x00, 0x0e, 0x09, 0x04, 0x03, 0x0d, 0x08, 0x07, 0x02, 0x0c, 0x0b, 0x06, 0x01}; const int128 BS0 = {0x5555555555555555ULL, 0x5555555555555555ULL}; const int128 BS1 = {0x3333333333333333ULL, 0x3333333333333333ULL}; const int128 BS2 = {0x0f0f0f0f0f0f0f0fULL, 0x0f0f0f0f0f0f0f0fULL}; curvedns-curvedns-0.87/nacl/crypto_stream/aes128ctr/portable/consts.h000066400000000000000000000016121150631715100257460ustar00rootroot00000000000000#ifndef CONSTS_H #define CONSTS_H #include "int128.h" #define ROTB crypto_stream_aes128ctr_portable_ROTB #define M0 crypto_stream_aes128ctr_portable_M0 #define EXPB0 crypto_stream_aes128ctr_portable_EXPB0 #define SWAP32 crypto_stream_aes128ctr_portable_SWAP32 #define M0SWAP crypto_stream_aes128ctr_portable_M0SWAP #define SR crypto_stream_aes128ctr_portable_SR #define SRM0 crypto_stream_aes128ctr_portable_SRM0 #define BS0 crypto_stream_aes128ctr_portable_BS0 #define BS1 crypto_stream_aes128ctr_portable_BS1 #define BS2 crypto_stream_aes128ctr_portable_BS2 extern const unsigned char ROTB[16]; extern const unsigned char M0[16]; extern const unsigned char EXPB0[16]; extern const unsigned char SWAP32[16]; extern const unsigned char M0SWAP[16]; extern const unsigned char SR[16]; extern const unsigned char SRM0[16]; extern const int128 BS0; extern const int128 BS1; extern const int128 BS2; #endif curvedns-curvedns-0.87/nacl/crypto_stream/aes128ctr/portable/int128.c000066400000000000000000000046601150631715100254630ustar00rootroot00000000000000#include "int128.h" #include "common.h" void xor2(int128 *r, const int128 *x) { r->a ^= x->a; r->b ^= x->b; } void and2(int128 *r, const int128 *x) { r->a &= x->a; r->b &= x->b; } void or2(int128 *r, const int128 *x) { r->a |= x->a; r->b |= x->b; } void copy2(int128 *r, const int128 *x) { r->a = x->a; r->b = x->b; } void shufb(int128 *r, const unsigned char *l) { int128 t; copy2(&t,r); unsigned char *cr = (unsigned char *)r; unsigned char *ct = (unsigned char *)&t; cr[0] = ct[l[0]]; cr[1] = ct[l[1]]; cr[2] = ct[l[2]]; cr[3] = ct[l[3]]; cr[4] = ct[l[4]]; cr[5] = ct[l[5]]; cr[6] = ct[l[6]]; cr[7] = ct[l[7]]; cr[8] = ct[l[8]]; cr[9] = ct[l[9]]; cr[10] = ct[l[10]]; cr[11] = ct[l[11]]; cr[12] = ct[l[12]]; cr[13] = ct[l[13]]; cr[14] = ct[l[14]]; cr[15] = ct[l[15]]; } void shufd(int128 *r, const int128 *x, const unsigned int c) { int128 t; uint32 *tp = (uint32 *)&t; uint32 *xp = (uint32 *)x; tp[0] = xp[c&3]; tp[1] = xp[(c>>2)&3]; tp[2] = xp[(c>>4)&3]; tp[3] = xp[(c>>6)&3]; copy2(r,&t); } void rshift32_littleendian(int128 *r, const unsigned int n) { unsigned char *rp = (unsigned char *)r; uint32 t; t = load32_littleendian(rp); t >>= n; store32_littleendian(rp, t); t = load32_littleendian(rp+4); t >>= n; store32_littleendian(rp+4, t); t = load32_littleendian(rp+8); t >>= n; store32_littleendian(rp+8, t); t = load32_littleendian(rp+12); t >>= n; store32_littleendian(rp+12, t); } void rshift64_littleendian(int128 *r, const unsigned int n) { unsigned char *rp = (unsigned char *)r; uint64 t; t = load64_littleendian(rp); t >>= n; store64_littleendian(rp, t); t = load64_littleendian(rp+8); t >>= n; store64_littleendian(rp+8, t); } void lshift64_littleendian(int128 *r, const unsigned int n) { unsigned char *rp = (unsigned char *)r; uint64 t; t = load64_littleendian(rp); t <<= n; store64_littleendian(rp, t); t = load64_littleendian(rp+8); t <<= n; store64_littleendian(rp+8, t); } void toggle(int128 *r) { r->a ^= 0xffffffffffffffffULL; r->b ^= 0xffffffffffffffffULL; } void xor_rcon(int128 *r) { unsigned char *rp = (unsigned char *)r; uint32 t; t = load32_littleendian(rp+12); t ^= 0xffffffff; store32_littleendian(rp+12, t); } void add_uint32_big(int128 *r, uint32 x) { unsigned char *rp = (unsigned char *)r; uint32 t; t = load32_littleendian(rp+12); t += x; store32_littleendian(rp+12, t); } curvedns-curvedns-0.87/nacl/crypto_stream/aes128ctr/portable/int128.h000066400000000000000000000026401150631715100254640ustar00rootroot00000000000000#ifndef INT128_H #define INT128_H #include "common.h" typedef struct{ unsigned long long a; unsigned long long b; } int128; #define xor2 crypto_stream_aes128ctr_portable_xor2 void xor2(int128 *r, const int128 *x); #define and2 crypto_stream_aes128ctr_portable_and2 void and2(int128 *r, const int128 *x); #define or2 crypto_stream_aes128ctr_portable_or2 void or2(int128 *r, const int128 *x); #define copy2 crypto_stream_aes128ctr_portable_copy2 void copy2(int128 *r, const int128 *x); #define shufb crypto_stream_aes128ctr_portable_shufb void shufb(int128 *r, const unsigned char *l); #define shufd crypto_stream_aes128ctr_portable_shufd void shufd(int128 *r, const int128 *x, const unsigned int c); #define rshift32_littleendian crypto_stream_aes128ctr_portable_rshift32_littleendian void rshift32_littleendian(int128 *r, const unsigned int n); #define rshift64_littleendian crypto_stream_aes128ctr_portable_rshift64_littleendian void rshift64_littleendian(int128 *r, const unsigned int n); #define lshift64_littleendian crypto_stream_aes128ctr_portable_lshift64_littleendian void lshift64_littleendian(int128 *r, const unsigned int n); #define toggle crypto_stream_aes128ctr_portable_toggle void toggle(int128 *r); #define xor_rcon crypto_stream_aes128ctr_portable_xor_rcon void xor_rcon(int128 *r); #define add_uint32_big crypto_stream_aes128ctr_portable_add_uint32_big void add_uint32_big(int128 *r, uint32 x); #endif curvedns-curvedns-0.87/nacl/crypto_stream/aes128ctr/portable/stream.c000066400000000000000000000012461150631715100257260ustar00rootroot00000000000000#include "crypto_stream.h" int crypto_stream( unsigned char *out, unsigned long long outlen, const unsigned char *n, const unsigned char *k ) { unsigned char d[crypto_stream_BEFORENMBYTES]; crypto_stream_beforenm(d, k); crypto_stream_afternm(out, outlen, n, d); return 0; } int crypto_stream_xor( unsigned char *out, const unsigned char *in, unsigned long long inlen, const unsigned char *n, const unsigned char *k ) { unsigned char d[crypto_stream_BEFORENMBYTES]; crypto_stream_beforenm(d, k); crypto_stream_xor_afternm(out, in, inlen, n, d); return 0; } curvedns-curvedns-0.87/nacl/crypto_stream/aes128ctr/portable/types.h000066400000000000000000000002341150631715100256000ustar00rootroot00000000000000#ifndef TYPES_H #define TYPES_H #include "crypto_uint32.h" typedef crypto_uint32 uint32; #include "crypto_uint64.h" typedef crypto_uint64 uint64; #endif curvedns-curvedns-0.87/nacl/crypto_stream/aes128ctr/portable/xor_afternm.c000066400000000000000000000120251150631715100267540ustar00rootroot00000000000000/* Author: Peter Schwabe, ported from an assembly implementation by Emilia Käsper * Date: 2009-03-19 * Public domain */ #include #include "int128.h" #include "common.h" #include "consts.h" #include "crypto_stream.h" int crypto_stream_xor_afternm(unsigned char *outp, const unsigned char *inp, unsigned long long len, const unsigned char *noncep, const unsigned char *c) { int128 xmm0; int128 xmm1; int128 xmm2; int128 xmm3; int128 xmm4; int128 xmm5; int128 xmm6; int128 xmm7; int128 xmm8; int128 xmm9; int128 xmm10; int128 xmm11; int128 xmm12; int128 xmm13; int128 xmm14; int128 xmm15; int128 nonce_stack; unsigned long long lensav; unsigned char bl[128]; unsigned char *blp; unsigned char b; uint32 tmp; /* Copy nonce on the stack */ copy2(&nonce_stack, (int128 *) (noncep + 0)); unsigned char *np = (unsigned char *)&nonce_stack; enc_block: xmm0 = *(int128 *) (np + 0); copy2(&xmm1, &xmm0); shufb(&xmm1, SWAP32); copy2(&xmm2, &xmm1); copy2(&xmm3, &xmm1); copy2(&xmm4, &xmm1); copy2(&xmm5, &xmm1); copy2(&xmm6, &xmm1); copy2(&xmm7, &xmm1); add_uint32_big(&xmm1, 1); add_uint32_big(&xmm2, 2); add_uint32_big(&xmm3, 3); add_uint32_big(&xmm4, 4); add_uint32_big(&xmm5, 5); add_uint32_big(&xmm6, 6); add_uint32_big(&xmm7, 7); shufb(&xmm0, M0); shufb(&xmm1, M0SWAP); shufb(&xmm2, M0SWAP); shufb(&xmm3, M0SWAP); shufb(&xmm4, M0SWAP); shufb(&xmm5, M0SWAP); shufb(&xmm6, M0SWAP); shufb(&xmm7, M0SWAP); bitslice(xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0, xmm8) aesround( 1, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c) aesround( 2, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c) aesround( 3, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c) aesround( 4, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c) aesround( 5, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c) aesround( 6, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c) aesround( 7, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c) aesround( 8, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c) aesround( 9, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c) lastround(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c) bitslice(xmm13, xmm10, xmm15, xmm11, xmm14, xmm12, xmm9, xmm8, xmm0) if(len < 128) goto partial; if(len == 128) goto full; tmp = load32_bigendian(np + 12); tmp += 8; store32_bigendian(np + 12, tmp); xor2(&xmm8, (int128 *)(inp + 0)); xor2(&xmm9, (int128 *)(inp + 16)); xor2(&xmm12, (int128 *)(inp + 32)); xor2(&xmm14, (int128 *)(inp + 48)); xor2(&xmm11, (int128 *)(inp + 64)); xor2(&xmm15, (int128 *)(inp + 80)); xor2(&xmm10, (int128 *)(inp + 96)); xor2(&xmm13, (int128 *)(inp + 112)); *(int128 *) (outp + 0) = xmm8; *(int128 *) (outp + 16) = xmm9; *(int128 *) (outp + 32) = xmm12; *(int128 *) (outp + 48) = xmm14; *(int128 *) (outp + 64) = xmm11; *(int128 *) (outp + 80) = xmm15; *(int128 *) (outp + 96) = xmm10; *(int128 *) (outp + 112) = xmm13; len -= 128; inp += 128; outp += 128; goto enc_block; partial: lensav = len; len >>= 4; tmp = load32_bigendian(np + 12); tmp += len; store32_bigendian(np + 12, tmp); blp = bl; *(int128 *)(blp + 0) = xmm8; *(int128 *)(blp + 16) = xmm9; *(int128 *)(blp + 32) = xmm12; *(int128 *)(blp + 48) = xmm14; *(int128 *)(blp + 64) = xmm11; *(int128 *)(blp + 80) = xmm15; *(int128 *)(blp + 96) = xmm10; *(int128 *)(blp + 112) = xmm13; bytes: if(lensav == 0) goto end; b = blp[0]; b ^= *(unsigned char *)(inp + 0); *(unsigned char *)(outp + 0) = b; blp += 1; inp +=1; outp +=1; lensav -= 1; goto bytes; full: tmp = load32_bigendian(np + 12); tmp += 8; store32_bigendian(np + 12, tmp); xor2(&xmm8, (int128 *)(inp + 0)); xor2(&xmm9, (int128 *)(inp + 16)); xor2(&xmm12, (int128 *)(inp + 32)); xor2(&xmm14, (int128 *)(inp + 48)); xor2(&xmm11, (int128 *)(inp + 64)); xor2(&xmm15, (int128 *)(inp + 80)); xor2(&xmm10, (int128 *)(inp + 96)); xor2(&xmm13, (int128 *)(inp + 112)); *(int128 *) (outp + 0) = xmm8; *(int128 *) (outp + 16) = xmm9; *(int128 *) (outp + 32) = xmm12; *(int128 *) (outp + 48) = xmm14; *(int128 *) (outp + 64) = xmm11; *(int128 *) (outp + 80) = xmm15; *(int128 *) (outp + 96) = xmm10; *(int128 *) (outp + 112) = xmm13; end: return 0; } curvedns-curvedns-0.87/nacl/crypto_stream/aes128ctr/used000066400000000000000000000000001150631715100233250ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_stream/measure.c000066400000000000000000000036311150631715100225500ustar00rootroot00000000000000#include #include "randombytes.h" #include "cpucycles.h" #include "crypto_stream.h" extern void printentry(long long,const char *,long long *,long long); extern unsigned char *alignedcalloc(unsigned long long); extern const char *primitiveimplementation; extern const char *implementationversion; extern const char *sizenames[]; extern const long long sizes[]; extern void allocate(void); extern void measure(void); const char *primitiveimplementation = crypto_stream_IMPLEMENTATION; const char *implementationversion = crypto_stream_VERSION; const char *sizenames[] = { "keybytes", "noncebytes", 0 }; const long long sizes[] = { crypto_stream_KEYBYTES, crypto_stream_NONCEBYTES }; #define MAXTEST_BYTES 4096 #ifdef SUPERCOP #define MGAP 8192 #else #define MGAP 8 #endif static unsigned char *k; static unsigned char *n; static unsigned char *m; static unsigned char *c; void preallocate(void) { } void allocate(void) { k = alignedcalloc(crypto_stream_KEYBYTES); n = alignedcalloc(crypto_stream_NONCEBYTES); m = alignedcalloc(MAXTEST_BYTES); c = alignedcalloc(MAXTEST_BYTES); } #define TIMINGS 15 static long long cycles[TIMINGS + 1]; void measure(void) { int i; int loop; int mlen; for (loop = 0;loop < LOOPS;++loop) { for (mlen = 0;mlen <= MAXTEST_BYTES;mlen += 1 + mlen / MGAP) { randombytes(k,crypto_stream_KEYBYTES); randombytes(n,crypto_stream_NONCEBYTES); randombytes(m,mlen); randombytes(c,mlen); for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_stream(c,mlen,n,k); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(mlen,"cycles",cycles,TIMINGS); for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_stream_xor(c,m,mlen,n,k); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(mlen,"xor_cycles",cycles,TIMINGS); } } } curvedns-curvedns-0.87/nacl/crypto_stream/salsa20/000077500000000000000000000000001150631715100222055ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_stream/salsa20/amd64_xmm6/000077500000000000000000000000001150631715100240675ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_stream/salsa20/amd64_xmm6/api.h000066400000000000000000000000671150631715100250140ustar00rootroot00000000000000#define CRYPTO_KEYBYTES 32 #define CRYPTO_NONCEBYTES 8 curvedns-curvedns-0.87/nacl/crypto_stream/salsa20/amd64_xmm6/stream.s000066400000000000000000003700761150631715100255630ustar00rootroot00000000000000 # qhasm: int64 r11_caller # qhasm: int64 r12_caller # qhasm: int64 r13_caller # qhasm: int64 r14_caller # qhasm: int64 r15_caller # qhasm: int64 rbx_caller # qhasm: int64 rbp_caller # qhasm: caller r11_caller # qhasm: caller r12_caller # qhasm: caller r13_caller # qhasm: caller r14_caller # qhasm: caller r15_caller # qhasm: caller rbx_caller # qhasm: caller rbp_caller # qhasm: stack64 r11_stack # qhasm: stack64 r12_stack # qhasm: stack64 r13_stack # qhasm: stack64 r14_stack # qhasm: stack64 r15_stack # qhasm: stack64 rbx_stack # qhasm: stack64 rbp_stack # qhasm: int64 a # qhasm: int64 arg1 # qhasm: int64 arg2 # qhasm: int64 arg3 # qhasm: int64 arg4 # qhasm: int64 arg5 # qhasm: input arg1 # qhasm: input arg2 # qhasm: input arg3 # qhasm: input arg4 # qhasm: input arg5 # qhasm: int64 k # qhasm: int64 kbits # qhasm: int64 iv # qhasm: int64 i # qhasm: stack128 x0 # qhasm: stack128 x1 # qhasm: stack128 x2 # qhasm: stack128 x3 # qhasm: int64 m # qhasm: int64 out # qhasm: int64 bytes # qhasm: stack32 eax_stack # qhasm: stack32 ebx_stack # qhasm: stack32 esi_stack # qhasm: stack32 edi_stack # qhasm: stack32 ebp_stack # qhasm: int6464 diag0 # qhasm: int6464 diag1 # qhasm: int6464 diag2 # qhasm: int6464 diag3 # qhasm: int6464 a0 # qhasm: int6464 a1 # qhasm: int6464 a2 # qhasm: int6464 a3 # qhasm: int6464 a4 # qhasm: int6464 a5 # qhasm: int6464 a6 # qhasm: int6464 a7 # qhasm: int6464 b0 # qhasm: int6464 b1 # qhasm: int6464 b2 # qhasm: int6464 b3 # qhasm: int6464 b4 # qhasm: int6464 b5 # qhasm: int6464 b6 # qhasm: int6464 b7 # qhasm: int6464 z0 # qhasm: int6464 z1 # qhasm: int6464 z2 # qhasm: int6464 z3 # qhasm: int6464 z4 # qhasm: int6464 z5 # qhasm: int6464 z6 # qhasm: int6464 z7 # qhasm: int6464 z8 # qhasm: int6464 z9 # qhasm: int6464 z10 # qhasm: int6464 z11 # qhasm: int6464 z12 # qhasm: int6464 z13 # qhasm: int6464 z14 # qhasm: int6464 z15 # qhasm: stack128 z0_stack # qhasm: stack128 z1_stack # qhasm: stack128 z2_stack # qhasm: stack128 z3_stack # qhasm: stack128 z4_stack # qhasm: stack128 z5_stack # qhasm: stack128 z6_stack # qhasm: stack128 z7_stack # qhasm: stack128 z8_stack # qhasm: stack128 z9_stack # qhasm: stack128 z10_stack # qhasm: stack128 z11_stack # qhasm: stack128 z12_stack # qhasm: stack128 z13_stack # qhasm: stack128 z14_stack # qhasm: stack128 z15_stack # qhasm: int6464 y0 # qhasm: int6464 y1 # qhasm: int6464 y2 # qhasm: int6464 y3 # qhasm: int6464 y4 # qhasm: int6464 y5 # qhasm: int6464 y6 # qhasm: int6464 y7 # qhasm: int6464 y8 # qhasm: int6464 y9 # qhasm: int6464 y10 # qhasm: int6464 y11 # qhasm: int6464 y12 # qhasm: int6464 y13 # qhasm: int6464 y14 # qhasm: int6464 y15 # qhasm: int6464 r0 # qhasm: int6464 r1 # qhasm: int6464 r2 # qhasm: int6464 r3 # qhasm: int6464 r4 # qhasm: int6464 r5 # qhasm: int6464 r6 # qhasm: int6464 r7 # qhasm: int6464 r8 # qhasm: int6464 r9 # qhasm: int6464 r10 # qhasm: int6464 r11 # qhasm: int6464 r12 # qhasm: int6464 r13 # qhasm: int6464 r14 # qhasm: int6464 r15 # qhasm: stack128 orig0 # qhasm: stack128 orig1 # qhasm: stack128 orig2 # qhasm: stack128 orig3 # qhasm: stack128 orig4 # qhasm: stack128 orig5 # qhasm: stack128 orig6 # qhasm: stack128 orig7 # qhasm: stack128 orig8 # qhasm: stack128 orig9 # qhasm: stack128 orig10 # qhasm: stack128 orig11 # qhasm: stack128 orig12 # qhasm: stack128 orig13 # qhasm: stack128 orig14 # qhasm: stack128 orig15 # qhasm: int64 in0 # qhasm: int64 in1 # qhasm: int64 in2 # qhasm: int64 in3 # qhasm: int64 in4 # qhasm: int64 in5 # qhasm: int64 in6 # qhasm: int64 in7 # qhasm: int64 in8 # qhasm: int64 in9 # qhasm: int64 in10 # qhasm: int64 in11 # qhasm: int64 in12 # qhasm: int64 in13 # qhasm: int64 in14 # qhasm: int64 in15 # qhasm: stack512 tmp # qhasm: int64 ctarget # qhasm: stack64 bytes_backup # qhasm: enter crypto_stream_salsa20_amd64_xmm6 .text .p2align 5 .globl _crypto_stream_salsa20_amd64_xmm6 .globl crypto_stream_salsa20_amd64_xmm6 _crypto_stream_salsa20_amd64_xmm6: crypto_stream_salsa20_amd64_xmm6: mov %rsp,%r11 and $31,%r11 add $480,%r11 sub %r11,%rsp # qhasm: r11_stack = r11_caller # asm 1: movq r11_stack=stack64#1 # asm 2: movq r11_stack=352(%rsp) movq %r11,352(%rsp) # qhasm: r12_stack = r12_caller # asm 1: movq r12_stack=stack64#2 # asm 2: movq r12_stack=360(%rsp) movq %r12,360(%rsp) # qhasm: r13_stack = r13_caller # asm 1: movq r13_stack=stack64#3 # asm 2: movq r13_stack=368(%rsp) movq %r13,368(%rsp) # qhasm: r14_stack = r14_caller # asm 1: movq r14_stack=stack64#4 # asm 2: movq r14_stack=376(%rsp) movq %r14,376(%rsp) # qhasm: r15_stack = r15_caller # asm 1: movq r15_stack=stack64#5 # asm 2: movq r15_stack=384(%rsp) movq %r15,384(%rsp) # qhasm: rbx_stack = rbx_caller # asm 1: movq rbx_stack=stack64#6 # asm 2: movq rbx_stack=392(%rsp) movq %rbx,392(%rsp) # qhasm: rbp_stack = rbp_caller # asm 1: movq rbp_stack=stack64#7 # asm 2: movq rbp_stack=400(%rsp) movq %rbp,400(%rsp) # qhasm: bytes = arg2 # asm 1: mov bytes=int64#6 # asm 2: mov bytes=%r9 mov %rsi,%r9 # qhasm: out = arg1 # asm 1: mov out=int64#1 # asm 2: mov out=%rdi mov %rdi,%rdi # qhasm: m = out # asm 1: mov m=int64#2 # asm 2: mov m=%rsi mov %rdi,%rsi # qhasm: iv = arg3 # asm 1: mov iv=int64#3 # asm 2: mov iv=%rdx mov %rdx,%rdx # qhasm: k = arg4 # asm 1: mov k=int64#8 # asm 2: mov k=%r10 mov %rcx,%r10 # qhasm: unsigned>? bytes - 0 # asm 1: cmp $0, jbe ._done # qhasm: a = 0 # asm 1: mov $0,>a=int64#7 # asm 2: mov $0,>a=%rax mov $0,%rax # qhasm: i = bytes # asm 1: mov i=int64#4 # asm 2: mov i=%rcx mov %r9,%rcx # qhasm: while (i) { *out++ = a; --i } rep stosb # qhasm: out -= bytes # asm 1: sub r11_stack=stack64#1 # asm 2: movq r11_stack=352(%rsp) movq %r11,352(%rsp) # qhasm: r12_stack = r12_caller # asm 1: movq r12_stack=stack64#2 # asm 2: movq r12_stack=360(%rsp) movq %r12,360(%rsp) # qhasm: r13_stack = r13_caller # asm 1: movq r13_stack=stack64#3 # asm 2: movq r13_stack=368(%rsp) movq %r13,368(%rsp) # qhasm: r14_stack = r14_caller # asm 1: movq r14_stack=stack64#4 # asm 2: movq r14_stack=376(%rsp) movq %r14,376(%rsp) # qhasm: r15_stack = r15_caller # asm 1: movq r15_stack=stack64#5 # asm 2: movq r15_stack=384(%rsp) movq %r15,384(%rsp) # qhasm: rbx_stack = rbx_caller # asm 1: movq rbx_stack=stack64#6 # asm 2: movq rbx_stack=392(%rsp) movq %rbx,392(%rsp) # qhasm: rbp_stack = rbp_caller # asm 1: movq rbp_stack=stack64#7 # asm 2: movq rbp_stack=400(%rsp) movq %rbp,400(%rsp) # qhasm: out = arg1 # asm 1: mov out=int64#1 # asm 2: mov out=%rdi mov %rdi,%rdi # qhasm: m = arg2 # asm 1: mov m=int64#2 # asm 2: mov m=%rsi mov %rsi,%rsi # qhasm: bytes = arg3 # asm 1: mov bytes=int64#6 # asm 2: mov bytes=%r9 mov %rdx,%r9 # qhasm: iv = arg4 # asm 1: mov iv=int64#3 # asm 2: mov iv=%rdx mov %rcx,%rdx # qhasm: k = arg5 # asm 1: mov k=int64#8 # asm 2: mov k=%r10 mov %r8,%r10 # qhasm: unsigned>? bytes - 0 # asm 1: cmp $0, jbe ._done # comment:fp stack unchanged by fallthrough # qhasm: start: ._start: # qhasm: in12 = *(uint32 *) (k + 20) # asm 1: movl 20(in12=int64#4d # asm 2: movl 20(in12=%ecx movl 20(%r10),%ecx # qhasm: in1 = *(uint32 *) (k + 0) # asm 1: movl 0(in1=int64#5d # asm 2: movl 0(in1=%r8d movl 0(%r10),%r8d # qhasm: in6 = *(uint32 *) (iv + 0) # asm 1: movl 0(in6=int64#7d # asm 2: movl 0(in6=%eax movl 0(%rdx),%eax # qhasm: in11 = *(uint32 *) (k + 16) # asm 1: movl 16(in11=int64#9d # asm 2: movl 16(in11=%r11d movl 16(%r10),%r11d # qhasm: ((uint32 *)&x1)[0] = in12 # asm 1: movl x1=stack128#1 # asm 2: movl x1=0(%rsp) movl %ecx,0(%rsp) # qhasm: ((uint32 *)&x1)[1] = in1 # asm 1: movl in8=int64#4 # asm 2: mov $0,>in8=%rcx mov $0,%rcx # qhasm: in13 = *(uint32 *) (k + 24) # asm 1: movl 24(in13=int64#5d # asm 2: movl 24(in13=%r8d movl 24(%r10),%r8d # qhasm: in2 = *(uint32 *) (k + 4) # asm 1: movl 4(in2=int64#7d # asm 2: movl 4(in2=%eax movl 4(%r10),%eax # qhasm: in7 = *(uint32 *) (iv + 4) # asm 1: movl 4(in7=int64#3d # asm 2: movl 4(in7=%edx movl 4(%rdx),%edx # qhasm: ((uint32 *)&x2)[0] = in8 # asm 1: movl x2=stack128#2 # asm 2: movl x2=16(%rsp) movl %ecx,16(%rsp) # qhasm: ((uint32 *)&x2)[1] = in13 # asm 1: movl in4=int64#3d # asm 2: movl 12(in4=%edx movl 12(%r10),%edx # qhasm: in9 = 0 # asm 1: mov $0,>in9=int64#4 # asm 2: mov $0,>in9=%rcx mov $0,%rcx # qhasm: in14 = *(uint32 *) (k + 28) # asm 1: movl 28(in14=int64#5d # asm 2: movl 28(in14=%r8d movl 28(%r10),%r8d # qhasm: in3 = *(uint32 *) (k + 8) # asm 1: movl 8(in3=int64#7d # asm 2: movl 8(in3=%eax movl 8(%r10),%eax # qhasm: ((uint32 *)&x3)[0] = in4 # asm 1: movl x3=stack128#3 # asm 2: movl x3=32(%rsp) movl %edx,32(%rsp) # qhasm: ((uint32 *)&x3)[1] = in9 # asm 1: movl in0=int64#3 # asm 2: mov $1634760805,>in0=%rdx mov $1634760805,%rdx # qhasm: in5 = 857760878 # asm 1: mov $857760878,>in5=int64#4 # asm 2: mov $857760878,>in5=%rcx mov $857760878,%rcx # qhasm: in10 = 2036477234 # asm 1: mov $2036477234,>in10=int64#5 # asm 2: mov $2036477234,>in10=%r8 mov $2036477234,%r8 # qhasm: in15 = 1797285236 # asm 1: mov $1797285236,>in15=int64#7 # asm 2: mov $1797285236,>in15=%rax mov $1797285236,%rax # qhasm: ((uint32 *)&x0)[0] = in0 # asm 1: movl x0=stack128#4 # asm 2: movl x0=48(%rsp) movl %edx,48(%rsp) # qhasm: ((uint32 *)&x0)[1] = in5 # asm 1: movl z0=int6464#1 # asm 2: movdqa z0=%xmm0 movdqa 48(%rsp),%xmm0 # qhasm: z5 = z0[1,1,1,1] # asm 1: pshufd $0x55,z5=int6464#2 # asm 2: pshufd $0x55,z5=%xmm1 pshufd $0x55,%xmm0,%xmm1 # qhasm: z10 = z0[2,2,2,2] # asm 1: pshufd $0xaa,z10=int6464#3 # asm 2: pshufd $0xaa,z10=%xmm2 pshufd $0xaa,%xmm0,%xmm2 # qhasm: z15 = z0[3,3,3,3] # asm 1: pshufd $0xff,z15=int6464#4 # asm 2: pshufd $0xff,z15=%xmm3 pshufd $0xff,%xmm0,%xmm3 # qhasm: z0 = z0[0,0,0,0] # asm 1: pshufd $0x00,z0=int6464#1 # asm 2: pshufd $0x00,z0=%xmm0 pshufd $0x00,%xmm0,%xmm0 # qhasm: orig5 = z5 # asm 1: movdqa orig5=stack128#5 # asm 2: movdqa orig5=64(%rsp) movdqa %xmm1,64(%rsp) # qhasm: orig10 = z10 # asm 1: movdqa orig10=stack128#6 # asm 2: movdqa orig10=80(%rsp) movdqa %xmm2,80(%rsp) # qhasm: orig15 = z15 # asm 1: movdqa orig15=stack128#7 # asm 2: movdqa orig15=96(%rsp) movdqa %xmm3,96(%rsp) # qhasm: orig0 = z0 # asm 1: movdqa orig0=stack128#8 # asm 2: movdqa orig0=112(%rsp) movdqa %xmm0,112(%rsp) # qhasm: z1 = x1 # asm 1: movdqa z1=int6464#1 # asm 2: movdqa z1=%xmm0 movdqa 0(%rsp),%xmm0 # qhasm: z6 = z1[2,2,2,2] # asm 1: pshufd $0xaa,z6=int6464#2 # asm 2: pshufd $0xaa,z6=%xmm1 pshufd $0xaa,%xmm0,%xmm1 # qhasm: z11 = z1[3,3,3,3] # asm 1: pshufd $0xff,z11=int6464#3 # asm 2: pshufd $0xff,z11=%xmm2 pshufd $0xff,%xmm0,%xmm2 # qhasm: z12 = z1[0,0,0,0] # asm 1: pshufd $0x00,z12=int6464#4 # asm 2: pshufd $0x00,z12=%xmm3 pshufd $0x00,%xmm0,%xmm3 # qhasm: z1 = z1[1,1,1,1] # asm 1: pshufd $0x55,z1=int6464#1 # asm 2: pshufd $0x55,z1=%xmm0 pshufd $0x55,%xmm0,%xmm0 # qhasm: orig6 = z6 # asm 1: movdqa orig6=stack128#9 # asm 2: movdqa orig6=128(%rsp) movdqa %xmm1,128(%rsp) # qhasm: orig11 = z11 # asm 1: movdqa orig11=stack128#10 # asm 2: movdqa orig11=144(%rsp) movdqa %xmm2,144(%rsp) # qhasm: orig12 = z12 # asm 1: movdqa orig12=stack128#11 # asm 2: movdqa orig12=160(%rsp) movdqa %xmm3,160(%rsp) # qhasm: orig1 = z1 # asm 1: movdqa orig1=stack128#12 # asm 2: movdqa orig1=176(%rsp) movdqa %xmm0,176(%rsp) # qhasm: z2 = x2 # asm 1: movdqa z2=int6464#1 # asm 2: movdqa z2=%xmm0 movdqa 16(%rsp),%xmm0 # qhasm: z7 = z2[3,3,3,3] # asm 1: pshufd $0xff,z7=int6464#2 # asm 2: pshufd $0xff,z7=%xmm1 pshufd $0xff,%xmm0,%xmm1 # qhasm: z13 = z2[1,1,1,1] # asm 1: pshufd $0x55,z13=int6464#3 # asm 2: pshufd $0x55,z13=%xmm2 pshufd $0x55,%xmm0,%xmm2 # qhasm: z2 = z2[2,2,2,2] # asm 1: pshufd $0xaa,z2=int6464#1 # asm 2: pshufd $0xaa,z2=%xmm0 pshufd $0xaa,%xmm0,%xmm0 # qhasm: orig7 = z7 # asm 1: movdqa orig7=stack128#13 # asm 2: movdqa orig7=192(%rsp) movdqa %xmm1,192(%rsp) # qhasm: orig13 = z13 # asm 1: movdqa orig13=stack128#14 # asm 2: movdqa orig13=208(%rsp) movdqa %xmm2,208(%rsp) # qhasm: orig2 = z2 # asm 1: movdqa orig2=stack128#15 # asm 2: movdqa orig2=224(%rsp) movdqa %xmm0,224(%rsp) # qhasm: z3 = x3 # asm 1: movdqa z3=int6464#1 # asm 2: movdqa z3=%xmm0 movdqa 32(%rsp),%xmm0 # qhasm: z4 = z3[0,0,0,0] # asm 1: pshufd $0x00,z4=int6464#2 # asm 2: pshufd $0x00,z4=%xmm1 pshufd $0x00,%xmm0,%xmm1 # qhasm: z14 = z3[2,2,2,2] # asm 1: pshufd $0xaa,z14=int6464#3 # asm 2: pshufd $0xaa,z14=%xmm2 pshufd $0xaa,%xmm0,%xmm2 # qhasm: z3 = z3[3,3,3,3] # asm 1: pshufd $0xff,z3=int6464#1 # asm 2: pshufd $0xff,z3=%xmm0 pshufd $0xff,%xmm0,%xmm0 # qhasm: orig4 = z4 # asm 1: movdqa orig4=stack128#16 # asm 2: movdqa orig4=240(%rsp) movdqa %xmm1,240(%rsp) # qhasm: orig14 = z14 # asm 1: movdqa orig14=stack128#17 # asm 2: movdqa orig14=256(%rsp) movdqa %xmm2,256(%rsp) # qhasm: orig3 = z3 # asm 1: movdqa orig3=stack128#18 # asm 2: movdqa orig3=272(%rsp) movdqa %xmm0,272(%rsp) # qhasm: bytesatleast256: ._bytesatleast256: # qhasm: in8 = ((uint32 *)&x2)[0] # asm 1: movl in8=int64#3d # asm 2: movl in8=%edx movl 16(%rsp),%edx # qhasm: in9 = ((uint32 *)&x3)[1] # asm 1: movl 4+in9=int64#4d # asm 2: movl 4+in9=%ecx movl 4+32(%rsp),%ecx # qhasm: ((uint32 *) &orig8)[0] = in8 # asm 1: movl orig8=stack128#19 # asm 2: movl orig8=288(%rsp) movl %edx,288(%rsp) # qhasm: ((uint32 *) &orig9)[0] = in9 # asm 1: movl orig9=stack128#20 # asm 2: movl orig9=304(%rsp) movl %ecx,304(%rsp) # qhasm: in8 += 1 # asm 1: add $1,in9=int64#4 # asm 2: mov in9=%rcx mov %rdx,%rcx # qhasm: (uint64) in9 >>= 32 # asm 1: shr $32,in9=int64#4 # asm 2: mov in9=%rcx mov %rdx,%rcx # qhasm: (uint64) in9 >>= 32 # asm 1: shr $32,in9=int64#4 # asm 2: mov in9=%rcx mov %rdx,%rcx # qhasm: (uint64) in9 >>= 32 # asm 1: shr $32,in9=int64#4 # asm 2: mov in9=%rcx mov %rdx,%rcx # qhasm: (uint64) in9 >>= 32 # asm 1: shr $32,x2=stack128#2 # asm 2: movl x2=16(%rsp) movl %edx,16(%rsp) # qhasm: ((uint32 *)&x3)[1] = in9 # asm 1: movl bytes_backup=stack64#8 # asm 2: movq bytes_backup=408(%rsp) movq %r9,408(%rsp) # qhasm: i = 20 # asm 1: mov $20,>i=int64#3 # asm 2: mov $20,>i=%rdx mov $20,%rdx # qhasm: z5 = orig5 # asm 1: movdqa z5=int6464#1 # asm 2: movdqa z5=%xmm0 movdqa 64(%rsp),%xmm0 # qhasm: z10 = orig10 # asm 1: movdqa z10=int6464#2 # asm 2: movdqa z10=%xmm1 movdqa 80(%rsp),%xmm1 # qhasm: z15 = orig15 # asm 1: movdqa z15=int6464#3 # asm 2: movdqa z15=%xmm2 movdqa 96(%rsp),%xmm2 # qhasm: z14 = orig14 # asm 1: movdqa z14=int6464#4 # asm 2: movdqa z14=%xmm3 movdqa 256(%rsp),%xmm3 # qhasm: z3 = orig3 # asm 1: movdqa z3=int6464#5 # asm 2: movdqa z3=%xmm4 movdqa 272(%rsp),%xmm4 # qhasm: z6 = orig6 # asm 1: movdqa z6=int6464#6 # asm 2: movdqa z6=%xmm5 movdqa 128(%rsp),%xmm5 # qhasm: z11 = orig11 # asm 1: movdqa z11=int6464#7 # asm 2: movdqa z11=%xmm6 movdqa 144(%rsp),%xmm6 # qhasm: z1 = orig1 # asm 1: movdqa z1=int6464#8 # asm 2: movdqa z1=%xmm7 movdqa 176(%rsp),%xmm7 # qhasm: z7 = orig7 # asm 1: movdqa z7=int6464#9 # asm 2: movdqa z7=%xmm8 movdqa 192(%rsp),%xmm8 # qhasm: z13 = orig13 # asm 1: movdqa z13=int6464#10 # asm 2: movdqa z13=%xmm9 movdqa 208(%rsp),%xmm9 # qhasm: z2 = orig2 # asm 1: movdqa z2=int6464#11 # asm 2: movdqa z2=%xmm10 movdqa 224(%rsp),%xmm10 # qhasm: z9 = orig9 # asm 1: movdqa z9=int6464#12 # asm 2: movdqa z9=%xmm11 movdqa 304(%rsp),%xmm11 # qhasm: z0 = orig0 # asm 1: movdqa z0=int6464#13 # asm 2: movdqa z0=%xmm12 movdqa 112(%rsp),%xmm12 # qhasm: z12 = orig12 # asm 1: movdqa z12=int6464#14 # asm 2: movdqa z12=%xmm13 movdqa 160(%rsp),%xmm13 # qhasm: z4 = orig4 # asm 1: movdqa z4=int6464#15 # asm 2: movdqa z4=%xmm14 movdqa 240(%rsp),%xmm14 # qhasm: z8 = orig8 # asm 1: movdqa z8=int6464#16 # asm 2: movdqa z8=%xmm15 movdqa 288(%rsp),%xmm15 # qhasm: mainloop1: ._mainloop1: # qhasm: z10_stack = z10 # asm 1: movdqa z10_stack=stack128#21 # asm 2: movdqa z10_stack=320(%rsp) movdqa %xmm1,320(%rsp) # qhasm: z15_stack = z15 # asm 1: movdqa z15_stack=stack128#22 # asm 2: movdqa z15_stack=336(%rsp) movdqa %xmm2,336(%rsp) # qhasm: y4 = z12 # asm 1: movdqa y4=int6464#2 # asm 2: movdqa y4=%xmm1 movdqa %xmm13,%xmm1 # qhasm: uint32323232 y4 += z0 # asm 1: paddd r4=int6464#3 # asm 2: movdqa r4=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y4 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,y9=int6464#2 # asm 2: movdqa y9=%xmm1 movdqa %xmm7,%xmm1 # qhasm: uint32323232 y9 += z5 # asm 1: paddd r9=int6464#3 # asm 2: movdqa r9=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y9 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,y8=int6464#2 # asm 2: movdqa y8=%xmm1 movdqa %xmm12,%xmm1 # qhasm: uint32323232 y8 += z4 # asm 1: paddd r8=int6464#3 # asm 2: movdqa r8=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y8 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y13=int6464#2 # asm 2: movdqa y13=%xmm1 movdqa %xmm0,%xmm1 # qhasm: uint32323232 y13 += z9 # asm 1: paddd r13=int6464#3 # asm 2: movdqa r13=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y13 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y12=int6464#2 # asm 2: movdqa y12=%xmm1 movdqa %xmm14,%xmm1 # qhasm: uint32323232 y12 += z8 # asm 1: paddd r12=int6464#3 # asm 2: movdqa r12=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y12 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y1=int6464#2 # asm 2: movdqa y1=%xmm1 movdqa %xmm11,%xmm1 # qhasm: uint32323232 y1 += z13 # asm 1: paddd r1=int6464#3 # asm 2: movdqa r1=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y1 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y0=int6464#2 # asm 2: movdqa y0=%xmm1 movdqa %xmm15,%xmm1 # qhasm: uint32323232 y0 += z12 # asm 1: paddd r0=int6464#3 # asm 2: movdqa r0=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y0 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,z10=int6464#2 # asm 2: movdqa z10=%xmm1 movdqa 320(%rsp),%xmm1 # qhasm: z0_stack = z0 # asm 1: movdqa z0_stack=stack128#21 # asm 2: movdqa z0_stack=320(%rsp) movdqa %xmm12,320(%rsp) # qhasm: y5 = z13 # asm 1: movdqa y5=int6464#3 # asm 2: movdqa y5=%xmm2 movdqa %xmm9,%xmm2 # qhasm: uint32323232 y5 += z1 # asm 1: paddd r5=int6464#13 # asm 2: movdqa r5=%xmm12 movdqa %xmm2,%xmm12 # qhasm: uint32323232 y5 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,y14=int6464#3 # asm 2: movdqa y14=%xmm2 movdqa %xmm5,%xmm2 # qhasm: uint32323232 y14 += z10 # asm 1: paddd r14=int6464#13 # asm 2: movdqa r14=%xmm12 movdqa %xmm2,%xmm12 # qhasm: uint32323232 y14 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,z15=int6464#3 # asm 2: movdqa z15=%xmm2 movdqa 336(%rsp),%xmm2 # qhasm: z5_stack = z5 # asm 1: movdqa z5_stack=stack128#22 # asm 2: movdqa z5_stack=336(%rsp) movdqa %xmm0,336(%rsp) # qhasm: y3 = z11 # asm 1: movdqa y3=int6464#1 # asm 2: movdqa y3=%xmm0 movdqa %xmm6,%xmm0 # qhasm: uint32323232 y3 += z15 # asm 1: paddd r3=int6464#13 # asm 2: movdqa r3=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y3 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,y2=int6464#1 # asm 2: movdqa y2=%xmm0 movdqa %xmm1,%xmm0 # qhasm: uint32323232 y2 += z14 # asm 1: paddd r2=int6464#13 # asm 2: movdqa r2=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y2 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y7=int6464#1 # asm 2: movdqa y7=%xmm0 movdqa %xmm2,%xmm0 # qhasm: uint32323232 y7 += z3 # asm 1: paddd r7=int6464#13 # asm 2: movdqa r7=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y7 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y6=int6464#1 # asm 2: movdqa y6=%xmm0 movdqa %xmm3,%xmm0 # qhasm: uint32323232 y6 += z2 # asm 1: paddd r6=int6464#13 # asm 2: movdqa r6=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y6 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y11=int6464#1 # asm 2: movdqa y11=%xmm0 movdqa %xmm4,%xmm0 # qhasm: uint32323232 y11 += z7 # asm 1: paddd r11=int6464#13 # asm 2: movdqa r11=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y11 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y10=int6464#1 # asm 2: movdqa y10=%xmm0 movdqa %xmm10,%xmm0 # qhasm: uint32323232 y10 += z6 # asm 1: paddd r10=int6464#13 # asm 2: movdqa r10=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y10 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,z0=int6464#1 # asm 2: movdqa z0=%xmm0 movdqa 320(%rsp),%xmm0 # qhasm: z10_stack = z10 # asm 1: movdqa z10_stack=stack128#21 # asm 2: movdqa z10_stack=320(%rsp) movdqa %xmm1,320(%rsp) # qhasm: y1 = z3 # asm 1: movdqa y1=int6464#2 # asm 2: movdqa y1=%xmm1 movdqa %xmm4,%xmm1 # qhasm: uint32323232 y1 += z0 # asm 1: paddd r1=int6464#13 # asm 2: movdqa r1=%xmm12 movdqa %xmm1,%xmm12 # qhasm: uint32323232 y1 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,y15=int6464#2 # asm 2: movdqa y15=%xmm1 movdqa %xmm8,%xmm1 # qhasm: uint32323232 y15 += z11 # asm 1: paddd r15=int6464#13 # asm 2: movdqa r15=%xmm12 movdqa %xmm1,%xmm12 # qhasm: uint32323232 y15 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,z5=int6464#13 # asm 2: movdqa z5=%xmm12 movdqa 336(%rsp),%xmm12 # qhasm: z15_stack = z15 # asm 1: movdqa z15_stack=stack128#22 # asm 2: movdqa z15_stack=336(%rsp) movdqa %xmm2,336(%rsp) # qhasm: y6 = z4 # asm 1: movdqa y6=int6464#2 # asm 2: movdqa y6=%xmm1 movdqa %xmm14,%xmm1 # qhasm: uint32323232 y6 += z5 # asm 1: paddd r6=int6464#3 # asm 2: movdqa r6=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y6 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,y2=int6464#2 # asm 2: movdqa y2=%xmm1 movdqa %xmm0,%xmm1 # qhasm: uint32323232 y2 += z1 # asm 1: paddd r2=int6464#3 # asm 2: movdqa r2=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y2 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y7=int6464#2 # asm 2: movdqa y7=%xmm1 movdqa %xmm12,%xmm1 # qhasm: uint32323232 y7 += z6 # asm 1: paddd r7=int6464#3 # asm 2: movdqa r7=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y7 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y3=int6464#2 # asm 2: movdqa y3=%xmm1 movdqa %xmm7,%xmm1 # qhasm: uint32323232 y3 += z2 # asm 1: paddd r3=int6464#3 # asm 2: movdqa r3=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y3 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y4=int6464#2 # asm 2: movdqa y4=%xmm1 movdqa %xmm5,%xmm1 # qhasm: uint32323232 y4 += z7 # asm 1: paddd r4=int6464#3 # asm 2: movdqa r4=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y4 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y0=int6464#2 # asm 2: movdqa y0=%xmm1 movdqa %xmm10,%xmm1 # qhasm: uint32323232 y0 += z3 # asm 1: paddd r0=int6464#3 # asm 2: movdqa r0=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y0 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,z10=int6464#2 # asm 2: movdqa z10=%xmm1 movdqa 320(%rsp),%xmm1 # qhasm: z0_stack = z0 # asm 1: movdqa z0_stack=stack128#21 # asm 2: movdqa z0_stack=320(%rsp) movdqa %xmm0,320(%rsp) # qhasm: y5 = z7 # asm 1: movdqa y5=int6464#1 # asm 2: movdqa y5=%xmm0 movdqa %xmm8,%xmm0 # qhasm: uint32323232 y5 += z4 # asm 1: paddd r5=int6464#3 # asm 2: movdqa r5=%xmm2 movdqa %xmm0,%xmm2 # qhasm: uint32323232 y5 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,y11=int6464#1 # asm 2: movdqa y11=%xmm0 movdqa %xmm11,%xmm0 # qhasm: uint32323232 y11 += z10 # asm 1: paddd r11=int6464#3 # asm 2: movdqa r11=%xmm2 movdqa %xmm0,%xmm2 # qhasm: uint32323232 y11 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,z15=int6464#3 # asm 2: movdqa z15=%xmm2 movdqa 336(%rsp),%xmm2 # qhasm: z5_stack = z5 # asm 1: movdqa z5_stack=stack128#22 # asm 2: movdqa z5_stack=336(%rsp) movdqa %xmm12,336(%rsp) # qhasm: y12 = z14 # asm 1: movdqa y12=int6464#1 # asm 2: movdqa y12=%xmm0 movdqa %xmm3,%xmm0 # qhasm: uint32323232 y12 += z15 # asm 1: paddd r12=int6464#13 # asm 2: movdqa r12=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y12 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,y8=int6464#1 # asm 2: movdqa y8=%xmm0 movdqa %xmm1,%xmm0 # qhasm: uint32323232 y8 += z11 # asm 1: paddd r8=int6464#13 # asm 2: movdqa r8=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y8 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y13=int6464#1 # asm 2: movdqa y13=%xmm0 movdqa %xmm2,%xmm0 # qhasm: uint32323232 y13 += z12 # asm 1: paddd r13=int6464#13 # asm 2: movdqa r13=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y13 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y9=int6464#1 # asm 2: movdqa y9=%xmm0 movdqa %xmm6,%xmm0 # qhasm: uint32323232 y9 += z8 # asm 1: paddd r9=int6464#13 # asm 2: movdqa r9=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y9 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y14=int6464#1 # asm 2: movdqa y14=%xmm0 movdqa %xmm13,%xmm0 # qhasm: uint32323232 y14 += z13 # asm 1: paddd r14=int6464#13 # asm 2: movdqa r14=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y14 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y10=int6464#1 # asm 2: movdqa y10=%xmm0 movdqa %xmm15,%xmm0 # qhasm: uint32323232 y10 += z9 # asm 1: paddd r10=int6464#13 # asm 2: movdqa r10=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y10 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,y15=int6464#1 # asm 2: movdqa y15=%xmm0 movdqa %xmm9,%xmm0 # qhasm: uint32323232 y15 += z14 # asm 1: paddd r15=int6464#13 # asm 2: movdqa r15=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y15 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,z0=int6464#13 # asm 2: movdqa z0=%xmm12 movdqa 320(%rsp),%xmm12 # qhasm: z5 = z5_stack # asm 1: movdqa z5=int6464#1 # asm 2: movdqa z5=%xmm0 movdqa 336(%rsp),%xmm0 # qhasm: unsigned>? i -= 2 # asm 1: sub $2, ja ._mainloop1 # qhasm: uint32323232 z0 += orig0 # asm 1: paddd in0=int64#3 # asm 2: movd in0=%rdx movd %xmm12,%rdx # qhasm: in1 = z1 # asm 1: movd in1=int64#4 # asm 2: movd in1=%rcx movd %xmm7,%rcx # qhasm: in2 = z2 # asm 1: movd in2=int64#5 # asm 2: movd in2=%r8 movd %xmm10,%r8 # qhasm: in3 = z3 # asm 1: movd in3=int64#6 # asm 2: movd in3=%r9 movd %xmm4,%r9 # qhasm: z0 <<<= 96 # asm 1: pshufd $0x39,in0=int64#3 # asm 2: movd in0=%rdx movd %xmm12,%rdx # qhasm: in1 = z1 # asm 1: movd in1=int64#4 # asm 2: movd in1=%rcx movd %xmm7,%rcx # qhasm: in2 = z2 # asm 1: movd in2=int64#5 # asm 2: movd in2=%r8 movd %xmm10,%r8 # qhasm: in3 = z3 # asm 1: movd in3=int64#6 # asm 2: movd in3=%r9 movd %xmm4,%r9 # qhasm: z0 <<<= 96 # asm 1: pshufd $0x39,in0=int64#3 # asm 2: movd in0=%rdx movd %xmm12,%rdx # qhasm: in1 = z1 # asm 1: movd in1=int64#4 # asm 2: movd in1=%rcx movd %xmm7,%rcx # qhasm: in2 = z2 # asm 1: movd in2=int64#5 # asm 2: movd in2=%r8 movd %xmm10,%r8 # qhasm: in3 = z3 # asm 1: movd in3=int64#6 # asm 2: movd in3=%r9 movd %xmm4,%r9 # qhasm: z0 <<<= 96 # asm 1: pshufd $0x39,in0=int64#3 # asm 2: movd in0=%rdx movd %xmm12,%rdx # qhasm: in1 = z1 # asm 1: movd in1=int64#4 # asm 2: movd in1=%rcx movd %xmm7,%rcx # qhasm: in2 = z2 # asm 1: movd in2=int64#5 # asm 2: movd in2=%r8 movd %xmm10,%r8 # qhasm: in3 = z3 # asm 1: movd in3=int64#6 # asm 2: movd in3=%r9 movd %xmm4,%r9 # qhasm: (uint32) in0 ^= *(uint32 *) (m + 192) # asm 1: xorl 192(in4=int64#3 # asm 2: movd in4=%rdx movd %xmm14,%rdx # qhasm: in5 = z5 # asm 1: movd in5=int64#4 # asm 2: movd in5=%rcx movd %xmm0,%rcx # qhasm: in6 = z6 # asm 1: movd in6=int64#5 # asm 2: movd in6=%r8 movd %xmm5,%r8 # qhasm: in7 = z7 # asm 1: movd in7=int64#6 # asm 2: movd in7=%r9 movd %xmm8,%r9 # qhasm: z4 <<<= 96 # asm 1: pshufd $0x39,in4=int64#3 # asm 2: movd in4=%rdx movd %xmm14,%rdx # qhasm: in5 = z5 # asm 1: movd in5=int64#4 # asm 2: movd in5=%rcx movd %xmm0,%rcx # qhasm: in6 = z6 # asm 1: movd in6=int64#5 # asm 2: movd in6=%r8 movd %xmm5,%r8 # qhasm: in7 = z7 # asm 1: movd in7=int64#6 # asm 2: movd in7=%r9 movd %xmm8,%r9 # qhasm: z4 <<<= 96 # asm 1: pshufd $0x39,in4=int64#3 # asm 2: movd in4=%rdx movd %xmm14,%rdx # qhasm: in5 = z5 # asm 1: movd in5=int64#4 # asm 2: movd in5=%rcx movd %xmm0,%rcx # qhasm: in6 = z6 # asm 1: movd in6=int64#5 # asm 2: movd in6=%r8 movd %xmm5,%r8 # qhasm: in7 = z7 # asm 1: movd in7=int64#6 # asm 2: movd in7=%r9 movd %xmm8,%r9 # qhasm: z4 <<<= 96 # asm 1: pshufd $0x39,in4=int64#3 # asm 2: movd in4=%rdx movd %xmm14,%rdx # qhasm: in5 = z5 # asm 1: movd in5=int64#4 # asm 2: movd in5=%rcx movd %xmm0,%rcx # qhasm: in6 = z6 # asm 1: movd in6=int64#5 # asm 2: movd in6=%r8 movd %xmm5,%r8 # qhasm: in7 = z7 # asm 1: movd in7=int64#6 # asm 2: movd in7=%r9 movd %xmm8,%r9 # qhasm: (uint32) in4 ^= *(uint32 *) (m + 208) # asm 1: xorl 208(in8=int64#3 # asm 2: movd in8=%rdx movd %xmm15,%rdx # qhasm: in9 = z9 # asm 1: movd in9=int64#4 # asm 2: movd in9=%rcx movd %xmm11,%rcx # qhasm: in10 = z10 # asm 1: movd in10=int64#5 # asm 2: movd in10=%r8 movd %xmm1,%r8 # qhasm: in11 = z11 # asm 1: movd in11=int64#6 # asm 2: movd in11=%r9 movd %xmm6,%r9 # qhasm: z8 <<<= 96 # asm 1: pshufd $0x39,in8=int64#3 # asm 2: movd in8=%rdx movd %xmm15,%rdx # qhasm: in9 = z9 # asm 1: movd in9=int64#4 # asm 2: movd in9=%rcx movd %xmm11,%rcx # qhasm: in10 = z10 # asm 1: movd in10=int64#5 # asm 2: movd in10=%r8 movd %xmm1,%r8 # qhasm: in11 = z11 # asm 1: movd in11=int64#6 # asm 2: movd in11=%r9 movd %xmm6,%r9 # qhasm: z8 <<<= 96 # asm 1: pshufd $0x39,in8=int64#3 # asm 2: movd in8=%rdx movd %xmm15,%rdx # qhasm: in9 = z9 # asm 1: movd in9=int64#4 # asm 2: movd in9=%rcx movd %xmm11,%rcx # qhasm: in10 = z10 # asm 1: movd in10=int64#5 # asm 2: movd in10=%r8 movd %xmm1,%r8 # qhasm: in11 = z11 # asm 1: movd in11=int64#6 # asm 2: movd in11=%r9 movd %xmm6,%r9 # qhasm: z8 <<<= 96 # asm 1: pshufd $0x39,in8=int64#3 # asm 2: movd in8=%rdx movd %xmm15,%rdx # qhasm: in9 = z9 # asm 1: movd in9=int64#4 # asm 2: movd in9=%rcx movd %xmm11,%rcx # qhasm: in10 = z10 # asm 1: movd in10=int64#5 # asm 2: movd in10=%r8 movd %xmm1,%r8 # qhasm: in11 = z11 # asm 1: movd in11=int64#6 # asm 2: movd in11=%r9 movd %xmm6,%r9 # qhasm: (uint32) in8 ^= *(uint32 *) (m + 224) # asm 1: xorl 224(in12=int64#3 # asm 2: movd in12=%rdx movd %xmm13,%rdx # qhasm: in13 = z13 # asm 1: movd in13=int64#4 # asm 2: movd in13=%rcx movd %xmm9,%rcx # qhasm: in14 = z14 # asm 1: movd in14=int64#5 # asm 2: movd in14=%r8 movd %xmm3,%r8 # qhasm: in15 = z15 # asm 1: movd in15=int64#6 # asm 2: movd in15=%r9 movd %xmm2,%r9 # qhasm: z12 <<<= 96 # asm 1: pshufd $0x39,in12=int64#3 # asm 2: movd in12=%rdx movd %xmm13,%rdx # qhasm: in13 = z13 # asm 1: movd in13=int64#4 # asm 2: movd in13=%rcx movd %xmm9,%rcx # qhasm: in14 = z14 # asm 1: movd in14=int64#5 # asm 2: movd in14=%r8 movd %xmm3,%r8 # qhasm: in15 = z15 # asm 1: movd in15=int64#6 # asm 2: movd in15=%r9 movd %xmm2,%r9 # qhasm: z12 <<<= 96 # asm 1: pshufd $0x39,in12=int64#3 # asm 2: movd in12=%rdx movd %xmm13,%rdx # qhasm: in13 = z13 # asm 1: movd in13=int64#4 # asm 2: movd in13=%rcx movd %xmm9,%rcx # qhasm: in14 = z14 # asm 1: movd in14=int64#5 # asm 2: movd in14=%r8 movd %xmm3,%r8 # qhasm: in15 = z15 # asm 1: movd in15=int64#6 # asm 2: movd in15=%r9 movd %xmm2,%r9 # qhasm: z12 <<<= 96 # asm 1: pshufd $0x39,in12=int64#3 # asm 2: movd in12=%rdx movd %xmm13,%rdx # qhasm: in13 = z13 # asm 1: movd in13=int64#4 # asm 2: movd in13=%rcx movd %xmm9,%rcx # qhasm: in14 = z14 # asm 1: movd in14=int64#5 # asm 2: movd in14=%r8 movd %xmm3,%r8 # qhasm: in15 = z15 # asm 1: movd in15=int64#6 # asm 2: movd in15=%r9 movd %xmm2,%r9 # qhasm: (uint32) in12 ^= *(uint32 *) (m + 240) # asm 1: xorl 240(bytes=int64#6 # asm 2: movq bytes=%r9 movq 408(%rsp),%r9 # qhasm: bytes -= 256 # asm 1: sub $256,? bytes - 0 # asm 1: cmp $0, jbe ._done # comment:fp stack unchanged by fallthrough # qhasm: bytesbetween1and255: ._bytesbetween1and255: # qhasm: unsignedctarget=int64#3 # asm 2: mov ctarget=%rdx mov %rdi,%rdx # qhasm: out = &tmp # asm 1: leaq out=int64#1 # asm 2: leaq out=%rdi leaq 416(%rsp),%rdi # qhasm: i = bytes # asm 1: mov i=int64#4 # asm 2: mov i=%rcx mov %r9,%rcx # qhasm: while (i) { *out++ = *m++; --i } rep movsb # qhasm: out = &tmp # asm 1: leaq out=int64#1 # asm 2: leaq out=%rdi leaq 416(%rsp),%rdi # qhasm: m = &tmp # asm 1: leaq m=int64#2 # asm 2: leaq m=%rsi leaq 416(%rsp),%rsi # comment:fp stack unchanged by fallthrough # qhasm: nocopy: ._nocopy: # qhasm: bytes_backup = bytes # asm 1: movq bytes_backup=stack64#8 # asm 2: movq bytes_backup=408(%rsp) movq %r9,408(%rsp) # qhasm: diag0 = x0 # asm 1: movdqa diag0=int6464#1 # asm 2: movdqa diag0=%xmm0 movdqa 48(%rsp),%xmm0 # qhasm: diag1 = x1 # asm 1: movdqa diag1=int6464#2 # asm 2: movdqa diag1=%xmm1 movdqa 0(%rsp),%xmm1 # qhasm: diag2 = x2 # asm 1: movdqa diag2=int6464#3 # asm 2: movdqa diag2=%xmm2 movdqa 16(%rsp),%xmm2 # qhasm: diag3 = x3 # asm 1: movdqa diag3=int6464#4 # asm 2: movdqa diag3=%xmm3 movdqa 32(%rsp),%xmm3 # qhasm: a0 = diag1 # asm 1: movdqa a0=int6464#5 # asm 2: movdqa a0=%xmm4 movdqa %xmm1,%xmm4 # qhasm: i = 20 # asm 1: mov $20,>i=int64#4 # asm 2: mov $20,>i=%rcx mov $20,%rcx # qhasm: mainloop2: ._mainloop2: # qhasm: uint32323232 a0 += diag0 # asm 1: paddd a1=int6464#6 # asm 2: movdqa a1=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b0 = a0 # asm 1: movdqa b0=int6464#7 # asm 2: movdqa b0=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a0 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a2=int6464#5 # asm 2: movdqa a2=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b1 = a1 # asm 1: movdqa b1=int6464#7 # asm 2: movdqa b1=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a1 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a3=int6464#6 # asm 2: movdqa a3=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b2 = a2 # asm 1: movdqa b2=int6464#7 # asm 2: movdqa b2=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a2 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,a4=int6464#5 # asm 2: movdqa a4=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b3 = a3 # asm 1: movdqa b3=int6464#7 # asm 2: movdqa b3=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a3 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,a5=int6464#6 # asm 2: movdqa a5=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b4 = a4 # asm 1: movdqa b4=int6464#7 # asm 2: movdqa b4=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a4 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a6=int6464#5 # asm 2: movdqa a6=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b5 = a5 # asm 1: movdqa b5=int6464#7 # asm 2: movdqa b5=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a5 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a7=int6464#6 # asm 2: movdqa a7=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b6 = a6 # asm 1: movdqa b6=int6464#7 # asm 2: movdqa b6=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a6 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,a0=int6464#5 # asm 2: movdqa a0=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b7 = a7 # asm 1: movdqa b7=int6464#7 # asm 2: movdqa b7=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a7 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,a1=int6464#6 # asm 2: movdqa a1=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b0 = a0 # asm 1: movdqa b0=int6464#7 # asm 2: movdqa b0=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a0 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a2=int6464#5 # asm 2: movdqa a2=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b1 = a1 # asm 1: movdqa b1=int6464#7 # asm 2: movdqa b1=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a1 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a3=int6464#6 # asm 2: movdqa a3=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b2 = a2 # asm 1: movdqa b2=int6464#7 # asm 2: movdqa b2=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a2 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,a4=int6464#5 # asm 2: movdqa a4=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b3 = a3 # asm 1: movdqa b3=int6464#7 # asm 2: movdqa b3=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a3 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,a5=int6464#6 # asm 2: movdqa a5=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b4 = a4 # asm 1: movdqa b4=int6464#7 # asm 2: movdqa b4=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a4 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a6=int6464#5 # asm 2: movdqa a6=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b5 = a5 # asm 1: movdqa b5=int6464#7 # asm 2: movdqa b5=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a5 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a7=int6464#6 # asm 2: movdqa a7=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b6 = a6 # asm 1: movdqa b6=int6464#7 # asm 2: movdqa b6=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a6 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,? i -= 4 # asm 1: sub $4,a0=int6464#5 # asm 2: movdqa a0=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b7 = a7 # asm 1: movdqa b7=int6464#7 # asm 2: movdqa b7=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a7 <<= 18 # asm 1: pslld $18,b0=int6464#8,>b0=int6464#8 # asm 2: pxor >b0=%xmm7,>b0=%xmm7 pxor %xmm7,%xmm7 # qhasm: uint32323232 b7 >>= 14 # asm 1: psrld $14, ja ._mainloop2 # qhasm: uint32323232 diag0 += x0 # asm 1: paddd in0=int64#4 # asm 2: movd in0=%rcx movd %xmm0,%rcx # qhasm: in12 = diag1 # asm 1: movd in12=int64#5 # asm 2: movd in12=%r8 movd %xmm1,%r8 # qhasm: in8 = diag2 # asm 1: movd in8=int64#6 # asm 2: movd in8=%r9 movd %xmm2,%r9 # qhasm: in4 = diag3 # asm 1: movd in4=int64#7 # asm 2: movd in4=%rax movd %xmm3,%rax # qhasm: diag0 <<<= 96 # asm 1: pshufd $0x39,in5=int64#4 # asm 2: movd in5=%rcx movd %xmm0,%rcx # qhasm: in1 = diag1 # asm 1: movd in1=int64#5 # asm 2: movd in1=%r8 movd %xmm1,%r8 # qhasm: in13 = diag2 # asm 1: movd in13=int64#6 # asm 2: movd in13=%r9 movd %xmm2,%r9 # qhasm: in9 = diag3 # asm 1: movd in9=int64#7 # asm 2: movd in9=%rax movd %xmm3,%rax # qhasm: diag0 <<<= 96 # asm 1: pshufd $0x39,in10=int64#4 # asm 2: movd in10=%rcx movd %xmm0,%rcx # qhasm: in6 = diag1 # asm 1: movd in6=int64#5 # asm 2: movd in6=%r8 movd %xmm1,%r8 # qhasm: in2 = diag2 # asm 1: movd in2=int64#6 # asm 2: movd in2=%r9 movd %xmm2,%r9 # qhasm: in14 = diag3 # asm 1: movd in14=int64#7 # asm 2: movd in14=%rax movd %xmm3,%rax # qhasm: diag0 <<<= 96 # asm 1: pshufd $0x39,in15=int64#4 # asm 2: movd in15=%rcx movd %xmm0,%rcx # qhasm: in11 = diag1 # asm 1: movd in11=int64#5 # asm 2: movd in11=%r8 movd %xmm1,%r8 # qhasm: in7 = diag2 # asm 1: movd in7=int64#6 # asm 2: movd in7=%r9 movd %xmm2,%r9 # qhasm: in3 = diag3 # asm 1: movd in3=int64#7 # asm 2: movd in3=%rax movd %xmm3,%rax # qhasm: (uint32) in15 ^= *(uint32 *) (m + 60) # asm 1: xorl 60(bytes=int64#6 # asm 2: movq bytes=%r9 movq 408(%rsp),%r9 # qhasm: in8 = ((uint32 *)&x2)[0] # asm 1: movl in8=int64#4d # asm 2: movl in8=%ecx movl 16(%rsp),%ecx # qhasm: in9 = ((uint32 *)&x3)[1] # asm 1: movl 4+in9=int64#5d # asm 2: movl 4+in9=%r8d movl 4+32(%rsp),%r8d # qhasm: in8 += 1 # asm 1: add $1,in9=int64#5 # asm 2: mov in9=%r8 mov %rcx,%r8 # qhasm: (uint64) in9 >>= 32 # asm 1: shr $32,x2=stack128#2 # asm 2: movl x2=16(%rsp) movl %ecx,16(%rsp) # qhasm: ((uint32 *)&x3)[1] = in9 # asm 1: movl ? unsigned ja ._bytesatleast65 # comment:fp stack unchanged by jump # qhasm: goto bytesatleast64 if !unsigned< jae ._bytesatleast64 # qhasm: m = out # asm 1: mov m=int64#2 # asm 2: mov m=%rsi mov %rdi,%rsi # qhasm: out = ctarget # asm 1: mov out=int64#1 # asm 2: mov out=%rdi mov %rdx,%rdi # qhasm: i = bytes # asm 1: mov i=int64#4 # asm 2: mov i=%rcx mov %r9,%rcx # qhasm: while (i) { *out++ = *m++; --i } rep movsb # comment:fp stack unchanged by fallthrough # qhasm: bytesatleast64: ._bytesatleast64: # comment:fp stack unchanged by fallthrough # qhasm: done: ._done: # qhasm: r11_caller = r11_stack # asm 1: movq r11_caller=int64#9 # asm 2: movq r11_caller=%r11 movq 352(%rsp),%r11 # qhasm: r12_caller = r12_stack # asm 1: movq r12_caller=int64#10 # asm 2: movq r12_caller=%r12 movq 360(%rsp),%r12 # qhasm: r13_caller = r13_stack # asm 1: movq r13_caller=int64#11 # asm 2: movq r13_caller=%r13 movq 368(%rsp),%r13 # qhasm: r14_caller = r14_stack # asm 1: movq r14_caller=int64#12 # asm 2: movq r14_caller=%r14 movq 376(%rsp),%r14 # qhasm: r15_caller = r15_stack # asm 1: movq r15_caller=int64#13 # asm 2: movq r15_caller=%r15 movq 384(%rsp),%r15 # qhasm: rbx_caller = rbx_stack # asm 1: movq rbx_caller=int64#14 # asm 2: movq rbx_caller=%rbx movq 392(%rsp),%rbx # qhasm: rbp_caller = rbp_stack # asm 1: movq rbp_caller=int64#15 # asm 2: movq rbp_caller=%rbp movq 400(%rsp),%rbp # qhasm: leave add %r11,%rsp xor %rax,%rax xor %rdx,%rdx ret # qhasm: bytesatleast65: ._bytesatleast65: # qhasm: bytes -= 64 # asm 1: sub $64,= 64) { crypto_core_salsa20(c,in,k,sigma); u = 1; for (i = 8;i < 16;++i) { u += (unsigned int) in[i]; in[i] = u; u >>= 8; } clen -= 64; c += 64; } if (clen) { crypto_core_salsa20(block,in,k,sigma); for (i = 0;i < clen;++i) c[i] = block[i]; } return 0; } curvedns-curvedns-0.87/nacl/crypto_stream/salsa20/ref/xor.c000066400000000000000000000016571150631715100237460ustar00rootroot00000000000000/* version 20080913 D. J. Bernstein Public domain. */ #include "crypto_core_salsa20.h" #include "crypto_stream.h" typedef unsigned int uint32; static const unsigned char sigma[16] = "expand 32-byte k"; int crypto_stream_xor( unsigned char *c, const unsigned char *m,unsigned long long mlen, const unsigned char *n, const unsigned char *k ) { unsigned char in[16]; unsigned char block[64]; int i; unsigned int u; if (!mlen) return 0; for (i = 0;i < 8;++i) in[i] = n[i]; for (i = 8;i < 16;++i) in[i] = 0; while (mlen >= 64) { crypto_core_salsa20(block,in,k,sigma); for (i = 0;i < 64;++i) c[i] = m[i] ^ block[i]; u = 1; for (i = 8;i < 16;++i) { u += (unsigned int) in[i]; in[i] = u; u >>= 8; } mlen -= 64; c += 64; m += 64; } if (mlen) { crypto_core_salsa20(block,in,k,sigma); for (i = 0;i < mlen;++i) c[i] = m[i] ^ block[i]; } return 0; } curvedns-curvedns-0.87/nacl/crypto_stream/salsa20/used000066400000000000000000000000001150631715100230560ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_stream/salsa20/x86_xmm5/000077500000000000000000000000001150631715100236005ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_stream/salsa20/x86_xmm5/api.h000066400000000000000000000000671150631715100245250ustar00rootroot00000000000000#define CRYPTO_KEYBYTES 32 #define CRYPTO_NONCEBYTES 8 curvedns-curvedns-0.87/nacl/crypto_stream/salsa20/x86_xmm5/stream.s000066400000000000000000004265601150631715100252740ustar00rootroot00000000000000 # qhasm: int32 a # qhasm: stack32 arg1 # qhasm: stack32 arg2 # qhasm: stack32 arg3 # qhasm: stack32 arg4 # qhasm: stack32 arg5 # qhasm: stack32 arg6 # qhasm: input arg1 # qhasm: input arg2 # qhasm: input arg3 # qhasm: input arg4 # qhasm: input arg5 # qhasm: input arg6 # qhasm: int32 eax # qhasm: int32 ebx # qhasm: int32 esi # qhasm: int32 edi # qhasm: int32 ebp # qhasm: caller eax # qhasm: caller ebx # qhasm: caller esi # qhasm: caller edi # qhasm: caller ebp # qhasm: int32 k # qhasm: int32 kbits # qhasm: int32 iv # qhasm: int32 i # qhasm: stack128 x0 # qhasm: stack128 x1 # qhasm: stack128 x2 # qhasm: stack128 x3 # qhasm: int32 m # qhasm: stack32 out_stack # qhasm: int32 out # qhasm: stack32 bytes_stack # qhasm: int32 bytes # qhasm: stack32 eax_stack # qhasm: stack32 ebx_stack # qhasm: stack32 esi_stack # qhasm: stack32 edi_stack # qhasm: stack32 ebp_stack # qhasm: int6464 diag0 # qhasm: int6464 diag1 # qhasm: int6464 diag2 # qhasm: int6464 diag3 # qhasm: int6464 a0 # qhasm: int6464 a1 # qhasm: int6464 a2 # qhasm: int6464 a3 # qhasm: int6464 a4 # qhasm: int6464 a5 # qhasm: int6464 a6 # qhasm: int6464 a7 # qhasm: int6464 b0 # qhasm: int6464 b1 # qhasm: int6464 b2 # qhasm: int6464 b3 # qhasm: int6464 b4 # qhasm: int6464 b5 # qhasm: int6464 b6 # qhasm: int6464 b7 # qhasm: int6464 z0 # qhasm: int6464 z1 # qhasm: int6464 z2 # qhasm: int6464 z3 # qhasm: int6464 z4 # qhasm: int6464 z5 # qhasm: int6464 z6 # qhasm: int6464 z7 # qhasm: int6464 z8 # qhasm: int6464 z9 # qhasm: int6464 z10 # qhasm: int6464 z11 # qhasm: int6464 z12 # qhasm: int6464 z13 # qhasm: int6464 z14 # qhasm: int6464 z15 # qhasm: stack128 z0_stack # qhasm: stack128 z1_stack # qhasm: stack128 z2_stack # qhasm: stack128 z3_stack # qhasm: stack128 z4_stack # qhasm: stack128 z5_stack # qhasm: stack128 z6_stack # qhasm: stack128 z7_stack # qhasm: stack128 z8_stack # qhasm: stack128 z9_stack # qhasm: stack128 z10_stack # qhasm: stack128 z11_stack # qhasm: stack128 z12_stack # qhasm: stack128 z13_stack # qhasm: stack128 z14_stack # qhasm: stack128 z15_stack # qhasm: stack128 orig0 # qhasm: stack128 orig1 # qhasm: stack128 orig2 # qhasm: stack128 orig3 # qhasm: stack128 orig4 # qhasm: stack128 orig5 # qhasm: stack128 orig6 # qhasm: stack128 orig7 # qhasm: stack128 orig8 # qhasm: stack128 orig9 # qhasm: stack128 orig10 # qhasm: stack128 orig11 # qhasm: stack128 orig12 # qhasm: stack128 orig13 # qhasm: stack128 orig14 # qhasm: stack128 orig15 # qhasm: int6464 p # qhasm: int6464 q # qhasm: int6464 r # qhasm: int6464 s # qhasm: int6464 t # qhasm: int6464 u # qhasm: int6464 v # qhasm: int6464 w # qhasm: int6464 mp # qhasm: int6464 mq # qhasm: int6464 mr # qhasm: int6464 ms # qhasm: int6464 mt # qhasm: int6464 mu # qhasm: int6464 mv # qhasm: int6464 mw # qhasm: int32 in0 # qhasm: int32 in1 # qhasm: int32 in2 # qhasm: int32 in3 # qhasm: int32 in4 # qhasm: int32 in5 # qhasm: int32 in6 # qhasm: int32 in7 # qhasm: int32 in8 # qhasm: int32 in9 # qhasm: int32 in10 # qhasm: int32 in11 # qhasm: int32 in12 # qhasm: int32 in13 # qhasm: int32 in14 # qhasm: int32 in15 # qhasm: stack512 tmp # qhasm: stack32 ctarget # qhasm: enter crypto_stream_salsa20_x86_xmm5 .text .p2align 5 .globl _crypto_stream_salsa20_x86_xmm5 .globl crypto_stream_salsa20_x86_xmm5 _crypto_stream_salsa20_x86_xmm5: crypto_stream_salsa20_x86_xmm5: mov %esp,%eax and $31,%eax add $704,%eax sub %eax,%esp # qhasm: eax_stack = eax # asm 1: movl eax_stack=stack32#1 # asm 2: movl eax_stack=0(%esp) movl %eax,0(%esp) # qhasm: ebx_stack = ebx # asm 1: movl ebx_stack=stack32#2 # asm 2: movl ebx_stack=4(%esp) movl %ebx,4(%esp) # qhasm: esi_stack = esi # asm 1: movl esi_stack=stack32#3 # asm 2: movl esi_stack=8(%esp) movl %esi,8(%esp) # qhasm: edi_stack = edi # asm 1: movl edi_stack=stack32#4 # asm 2: movl edi_stack=12(%esp) movl %edi,12(%esp) # qhasm: ebp_stack = ebp # asm 1: movl ebp_stack=stack32#5 # asm 2: movl ebp_stack=16(%esp) movl %ebp,16(%esp) # qhasm: bytes = arg2 # asm 1: movl bytes=int32#3 # asm 2: movl bytes=%edx movl 8(%esp,%eax),%edx # qhasm: out = arg1 # asm 1: movl out=int32#6 # asm 2: movl out=%edi movl 4(%esp,%eax),%edi # qhasm: m = out # asm 1: mov m=int32#5 # asm 2: mov m=%esi mov %edi,%esi # qhasm: iv = arg4 # asm 1: movl iv=int32#4 # asm 2: movl iv=%ebx movl 16(%esp,%eax),%ebx # qhasm: k = arg5 # asm 1: movl k=int32#7 # asm 2: movl k=%ebp movl 20(%esp,%eax),%ebp # qhasm: unsigned>? bytes - 0 # asm 1: cmp $0, jbe ._done # qhasm: a = 0 # asm 1: mov $0,>a=int32#1 # asm 2: mov $0,>a=%eax mov $0,%eax # qhasm: i = bytes # asm 1: mov i=int32#2 # asm 2: mov i=%ecx mov %edx,%ecx # qhasm: while (i) { *out++ = a; --i } rep stosb # qhasm: out -= bytes # asm 1: subl eax_stack=stack32#1 # asm 2: movl eax_stack=0(%esp) movl %eax,0(%esp) # qhasm: ebx_stack = ebx # asm 1: movl ebx_stack=stack32#2 # asm 2: movl ebx_stack=4(%esp) movl %ebx,4(%esp) # qhasm: esi_stack = esi # asm 1: movl esi_stack=stack32#3 # asm 2: movl esi_stack=8(%esp) movl %esi,8(%esp) # qhasm: edi_stack = edi # asm 1: movl edi_stack=stack32#4 # asm 2: movl edi_stack=12(%esp) movl %edi,12(%esp) # qhasm: ebp_stack = ebp # asm 1: movl ebp_stack=stack32#5 # asm 2: movl ebp_stack=16(%esp) movl %ebp,16(%esp) # qhasm: out = arg1 # asm 1: movl out=int32#6 # asm 2: movl out=%edi movl 4(%esp,%eax),%edi # qhasm: m = arg2 # asm 1: movl m=int32#5 # asm 2: movl m=%esi movl 8(%esp,%eax),%esi # qhasm: bytes = arg3 # asm 1: movl bytes=int32#3 # asm 2: movl bytes=%edx movl 12(%esp,%eax),%edx # qhasm: iv = arg5 # asm 1: movl iv=int32#4 # asm 2: movl iv=%ebx movl 20(%esp,%eax),%ebx # qhasm: k = arg6 # asm 1: movl k=int32#7 # asm 2: movl k=%ebp movl 24(%esp,%eax),%ebp # qhasm: unsigned>? bytes - 0 # asm 1: cmp $0, jbe ._done # comment:fp stack unchanged by fallthrough # qhasm: start: ._start: # qhasm: out_stack = out # asm 1: movl out_stack=stack32#6 # asm 2: movl out_stack=20(%esp) movl %edi,20(%esp) # qhasm: bytes_stack = bytes # asm 1: movl bytes_stack=stack32#7 # asm 2: movl bytes_stack=24(%esp) movl %edx,24(%esp) # qhasm: in4 = *(uint32 *) (k + 12) # asm 1: movl 12(in4=int32#1 # asm 2: movl 12(in4=%eax movl 12(%ebp),%eax # qhasm: in12 = *(uint32 *) (k + 20) # asm 1: movl 20(in12=int32#2 # asm 2: movl 20(in12=%ecx movl 20(%ebp),%ecx # qhasm: ((uint32 *)&x3)[0] = in4 # asm 1: movl x3=stack128#1 # asm 2: movl x3=32(%esp) movl %eax,32(%esp) # qhasm: ((uint32 *)&x1)[0] = in12 # asm 1: movl x1=stack128#2 # asm 2: movl x1=48(%esp) movl %ecx,48(%esp) # qhasm: in0 = 1634760805 # asm 1: mov $1634760805,>in0=int32#1 # asm 2: mov $1634760805,>in0=%eax mov $1634760805,%eax # qhasm: in8 = 0 # asm 1: mov $0,>in8=int32#2 # asm 2: mov $0,>in8=%ecx mov $0,%ecx # qhasm: ((uint32 *)&x0)[0] = in0 # asm 1: movl x0=stack128#3 # asm 2: movl x0=64(%esp) movl %eax,64(%esp) # qhasm: ((uint32 *)&x2)[0] = in8 # asm 1: movl x2=stack128#4 # asm 2: movl x2=80(%esp) movl %ecx,80(%esp) # qhasm: in6 = *(uint32 *) (iv + 0) # asm 1: movl 0(in6=int32#1 # asm 2: movl 0(in6=%eax movl 0(%ebx),%eax # qhasm: in7 = *(uint32 *) (iv + 4) # asm 1: movl 4(in7=int32#2 # asm 2: movl 4(in7=%ecx movl 4(%ebx),%ecx # qhasm: ((uint32 *)&x1)[2] = in6 # asm 1: movl in9=int32#1 # asm 2: mov $0,>in9=%eax mov $0,%eax # qhasm: in10 = 2036477234 # asm 1: mov $2036477234,>in10=int32#2 # asm 2: mov $2036477234,>in10=%ecx mov $2036477234,%ecx # qhasm: ((uint32 *)&x3)[1] = in9 # asm 1: movl in1=int32#1 # asm 2: movl 0(in1=%eax movl 0(%ebp),%eax # qhasm: in2 = *(uint32 *) (k + 4) # asm 1: movl 4(in2=int32#2 # asm 2: movl 4(in2=%ecx movl 4(%ebp),%ecx # qhasm: in3 = *(uint32 *) (k + 8) # asm 1: movl 8(in3=int32#3 # asm 2: movl 8(in3=%edx movl 8(%ebp),%edx # qhasm: in5 = 857760878 # asm 1: mov $857760878,>in5=int32#4 # asm 2: mov $857760878,>in5=%ebx mov $857760878,%ebx # qhasm: ((uint32 *)&x1)[1] = in1 # asm 1: movl in11=int32#1 # asm 2: movl 16(in11=%eax movl 16(%ebp),%eax # qhasm: in13 = *(uint32 *) (k + 24) # asm 1: movl 24(in13=int32#2 # asm 2: movl 24(in13=%ecx movl 24(%ebp),%ecx # qhasm: in14 = *(uint32 *) (k + 28) # asm 1: movl 28(in14=int32#3 # asm 2: movl 28(in14=%edx movl 28(%ebp),%edx # qhasm: in15 = 1797285236 # asm 1: mov $1797285236,>in15=int32#4 # asm 2: mov $1797285236,>in15=%ebx mov $1797285236,%ebx # qhasm: ((uint32 *)&x1)[3] = in11 # asm 1: movl bytes=int32#1 # asm 2: movl bytes=%eax movl 24(%esp),%eax # qhasm: unsignedz0=int6464#1 # asm 2: movdqa z0=%xmm0 movdqa 64(%esp),%xmm0 # qhasm: z5 = z0[1,1,1,1] # asm 1: pshufd $0x55,z5=int6464#2 # asm 2: pshufd $0x55,z5=%xmm1 pshufd $0x55,%xmm0,%xmm1 # qhasm: z10 = z0[2,2,2,2] # asm 1: pshufd $0xaa,z10=int6464#3 # asm 2: pshufd $0xaa,z10=%xmm2 pshufd $0xaa,%xmm0,%xmm2 # qhasm: z15 = z0[3,3,3,3] # asm 1: pshufd $0xff,z15=int6464#4 # asm 2: pshufd $0xff,z15=%xmm3 pshufd $0xff,%xmm0,%xmm3 # qhasm: z0 = z0[0,0,0,0] # asm 1: pshufd $0x00,z0=int6464#1 # asm 2: pshufd $0x00,z0=%xmm0 pshufd $0x00,%xmm0,%xmm0 # qhasm: orig5 = z5 # asm 1: movdqa orig5=stack128#5 # asm 2: movdqa orig5=96(%esp) movdqa %xmm1,96(%esp) # qhasm: orig10 = z10 # asm 1: movdqa orig10=stack128#6 # asm 2: movdqa orig10=112(%esp) movdqa %xmm2,112(%esp) # qhasm: orig15 = z15 # asm 1: movdqa orig15=stack128#7 # asm 2: movdqa orig15=128(%esp) movdqa %xmm3,128(%esp) # qhasm: orig0 = z0 # asm 1: movdqa orig0=stack128#8 # asm 2: movdqa orig0=144(%esp) movdqa %xmm0,144(%esp) # qhasm: z1 = x1 # asm 1: movdqa z1=int6464#1 # asm 2: movdqa z1=%xmm0 movdqa 48(%esp),%xmm0 # qhasm: z6 = z1[2,2,2,2] # asm 1: pshufd $0xaa,z6=int6464#2 # asm 2: pshufd $0xaa,z6=%xmm1 pshufd $0xaa,%xmm0,%xmm1 # qhasm: z11 = z1[3,3,3,3] # asm 1: pshufd $0xff,z11=int6464#3 # asm 2: pshufd $0xff,z11=%xmm2 pshufd $0xff,%xmm0,%xmm2 # qhasm: z12 = z1[0,0,0,0] # asm 1: pshufd $0x00,z12=int6464#4 # asm 2: pshufd $0x00,z12=%xmm3 pshufd $0x00,%xmm0,%xmm3 # qhasm: z1 = z1[1,1,1,1] # asm 1: pshufd $0x55,z1=int6464#1 # asm 2: pshufd $0x55,z1=%xmm0 pshufd $0x55,%xmm0,%xmm0 # qhasm: orig6 = z6 # asm 1: movdqa orig6=stack128#9 # asm 2: movdqa orig6=160(%esp) movdqa %xmm1,160(%esp) # qhasm: orig11 = z11 # asm 1: movdqa orig11=stack128#10 # asm 2: movdqa orig11=176(%esp) movdqa %xmm2,176(%esp) # qhasm: orig12 = z12 # asm 1: movdqa orig12=stack128#11 # asm 2: movdqa orig12=192(%esp) movdqa %xmm3,192(%esp) # qhasm: orig1 = z1 # asm 1: movdqa orig1=stack128#12 # asm 2: movdqa orig1=208(%esp) movdqa %xmm0,208(%esp) # qhasm: z2 = x2 # asm 1: movdqa z2=int6464#1 # asm 2: movdqa z2=%xmm0 movdqa 80(%esp),%xmm0 # qhasm: z7 = z2[3,3,3,3] # asm 1: pshufd $0xff,z7=int6464#2 # asm 2: pshufd $0xff,z7=%xmm1 pshufd $0xff,%xmm0,%xmm1 # qhasm: z13 = z2[1,1,1,1] # asm 1: pshufd $0x55,z13=int6464#3 # asm 2: pshufd $0x55,z13=%xmm2 pshufd $0x55,%xmm0,%xmm2 # qhasm: z2 = z2[2,2,2,2] # asm 1: pshufd $0xaa,z2=int6464#1 # asm 2: pshufd $0xaa,z2=%xmm0 pshufd $0xaa,%xmm0,%xmm0 # qhasm: orig7 = z7 # asm 1: movdqa orig7=stack128#13 # asm 2: movdqa orig7=224(%esp) movdqa %xmm1,224(%esp) # qhasm: orig13 = z13 # asm 1: movdqa orig13=stack128#14 # asm 2: movdqa orig13=240(%esp) movdqa %xmm2,240(%esp) # qhasm: orig2 = z2 # asm 1: movdqa orig2=stack128#15 # asm 2: movdqa orig2=256(%esp) movdqa %xmm0,256(%esp) # qhasm: z3 = x3 # asm 1: movdqa z3=int6464#1 # asm 2: movdqa z3=%xmm0 movdqa 32(%esp),%xmm0 # qhasm: z4 = z3[0,0,0,0] # asm 1: pshufd $0x00,z4=int6464#2 # asm 2: pshufd $0x00,z4=%xmm1 pshufd $0x00,%xmm0,%xmm1 # qhasm: z14 = z3[2,2,2,2] # asm 1: pshufd $0xaa,z14=int6464#3 # asm 2: pshufd $0xaa,z14=%xmm2 pshufd $0xaa,%xmm0,%xmm2 # qhasm: z3 = z3[3,3,3,3] # asm 1: pshufd $0xff,z3=int6464#1 # asm 2: pshufd $0xff,z3=%xmm0 pshufd $0xff,%xmm0,%xmm0 # qhasm: orig4 = z4 # asm 1: movdqa orig4=stack128#16 # asm 2: movdqa orig4=272(%esp) movdqa %xmm1,272(%esp) # qhasm: orig14 = z14 # asm 1: movdqa orig14=stack128#17 # asm 2: movdqa orig14=288(%esp) movdqa %xmm2,288(%esp) # qhasm: orig3 = z3 # asm 1: movdqa orig3=stack128#18 # asm 2: movdqa orig3=304(%esp) movdqa %xmm0,304(%esp) # qhasm: bytesatleast256: ._bytesatleast256: # qhasm: in8 = ((uint32 *)&x2)[0] # asm 1: movl in8=int32#2 # asm 2: movl in8=%ecx movl 80(%esp),%ecx # qhasm: in9 = ((uint32 *)&x3)[1] # asm 1: movl 4+in9=int32#3 # asm 2: movl 4+in9=%edx movl 4+32(%esp),%edx # qhasm: ((uint32 *) &orig8)[0] = in8 # asm 1: movl orig8=stack128#19 # asm 2: movl orig8=320(%esp) movl %ecx,320(%esp) # qhasm: ((uint32 *) &orig9)[0] = in9 # asm 1: movl orig9=stack128#20 # asm 2: movl orig9=336(%esp) movl %edx,336(%esp) # qhasm: carry? in8 += 1 # asm 1: add $1,x2=stack128#4 # asm 2: movl x2=80(%esp) movl %ecx,80(%esp) # qhasm: ((uint32 *)&x3)[1] = in9 # asm 1: movl bytes_stack=stack32#7 # asm 2: movl bytes_stack=24(%esp) movl %eax,24(%esp) # qhasm: i = 20 # asm 1: mov $20,>i=int32#1 # asm 2: mov $20,>i=%eax mov $20,%eax # qhasm: z5 = orig5 # asm 1: movdqa z5=int6464#1 # asm 2: movdqa z5=%xmm0 movdqa 96(%esp),%xmm0 # qhasm: z10 = orig10 # asm 1: movdqa z10=int6464#2 # asm 2: movdqa z10=%xmm1 movdqa 112(%esp),%xmm1 # qhasm: z15 = orig15 # asm 1: movdqa z15=int6464#3 # asm 2: movdqa z15=%xmm2 movdqa 128(%esp),%xmm2 # qhasm: z14 = orig14 # asm 1: movdqa z14=int6464#4 # asm 2: movdqa z14=%xmm3 movdqa 288(%esp),%xmm3 # qhasm: z3 = orig3 # asm 1: movdqa z3=int6464#5 # asm 2: movdqa z3=%xmm4 movdqa 304(%esp),%xmm4 # qhasm: z6 = orig6 # asm 1: movdqa z6=int6464#6 # asm 2: movdqa z6=%xmm5 movdqa 160(%esp),%xmm5 # qhasm: z11 = orig11 # asm 1: movdqa z11=int6464#7 # asm 2: movdqa z11=%xmm6 movdqa 176(%esp),%xmm6 # qhasm: z1 = orig1 # asm 1: movdqa z1=int6464#8 # asm 2: movdqa z1=%xmm7 movdqa 208(%esp),%xmm7 # qhasm: z5_stack = z5 # asm 1: movdqa z5_stack=stack128#21 # asm 2: movdqa z5_stack=352(%esp) movdqa %xmm0,352(%esp) # qhasm: z10_stack = z10 # asm 1: movdqa z10_stack=stack128#22 # asm 2: movdqa z10_stack=368(%esp) movdqa %xmm1,368(%esp) # qhasm: z15_stack = z15 # asm 1: movdqa z15_stack=stack128#23 # asm 2: movdqa z15_stack=384(%esp) movdqa %xmm2,384(%esp) # qhasm: z14_stack = z14 # asm 1: movdqa z14_stack=stack128#24 # asm 2: movdqa z14_stack=400(%esp) movdqa %xmm3,400(%esp) # qhasm: z3_stack = z3 # asm 1: movdqa z3_stack=stack128#25 # asm 2: movdqa z3_stack=416(%esp) movdqa %xmm4,416(%esp) # qhasm: z6_stack = z6 # asm 1: movdqa z6_stack=stack128#26 # asm 2: movdqa z6_stack=432(%esp) movdqa %xmm5,432(%esp) # qhasm: z11_stack = z11 # asm 1: movdqa z11_stack=stack128#27 # asm 2: movdqa z11_stack=448(%esp) movdqa %xmm6,448(%esp) # qhasm: z1_stack = z1 # asm 1: movdqa z1_stack=stack128#28 # asm 2: movdqa z1_stack=464(%esp) movdqa %xmm7,464(%esp) # qhasm: z7 = orig7 # asm 1: movdqa z7=int6464#5 # asm 2: movdqa z7=%xmm4 movdqa 224(%esp),%xmm4 # qhasm: z13 = orig13 # asm 1: movdqa z13=int6464#6 # asm 2: movdqa z13=%xmm5 movdqa 240(%esp),%xmm5 # qhasm: z2 = orig2 # asm 1: movdqa z2=int6464#7 # asm 2: movdqa z2=%xmm6 movdqa 256(%esp),%xmm6 # qhasm: z9 = orig9 # asm 1: movdqa z9=int6464#8 # asm 2: movdqa z9=%xmm7 movdqa 336(%esp),%xmm7 # qhasm: p = orig0 # asm 1: movdqa p=int6464#1 # asm 2: movdqa p=%xmm0 movdqa 144(%esp),%xmm0 # qhasm: t = orig12 # asm 1: movdqa t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa 192(%esp),%xmm2 # qhasm: q = orig4 # asm 1: movdqa q=int6464#4 # asm 2: movdqa q=%xmm3 movdqa 272(%esp),%xmm3 # qhasm: r = orig8 # asm 1: movdqa r=int6464#2 # asm 2: movdqa r=%xmm1 movdqa 320(%esp),%xmm1 # qhasm: z7_stack = z7 # asm 1: movdqa z7_stack=stack128#29 # asm 2: movdqa z7_stack=480(%esp) movdqa %xmm4,480(%esp) # qhasm: z13_stack = z13 # asm 1: movdqa z13_stack=stack128#30 # asm 2: movdqa z13_stack=496(%esp) movdqa %xmm5,496(%esp) # qhasm: z2_stack = z2 # asm 1: movdqa z2_stack=stack128#31 # asm 2: movdqa z2_stack=512(%esp) movdqa %xmm6,512(%esp) # qhasm: z9_stack = z9 # asm 1: movdqa z9_stack=stack128#32 # asm 2: movdqa z9_stack=528(%esp) movdqa %xmm7,528(%esp) # qhasm: z0_stack = p # asm 1: movdqa z0_stack=stack128#33 # asm 2: movdqa z0_stack=544(%esp) movdqa %xmm0,544(%esp) # qhasm: z12_stack = t # asm 1: movdqa z12_stack=stack128#34 # asm 2: movdqa z12_stack=560(%esp) movdqa %xmm2,560(%esp) # qhasm: z4_stack = q # asm 1: movdqa z4_stack=stack128#35 # asm 2: movdqa z4_stack=576(%esp) movdqa %xmm3,576(%esp) # qhasm: z8_stack = r # asm 1: movdqa z8_stack=stack128#36 # asm 2: movdqa z8_stack=592(%esp) movdqa %xmm1,592(%esp) # qhasm: mainloop1: ._mainloop1: # qhasm: assign xmm0 to p # qhasm: assign xmm1 to r # qhasm: assign xmm2 to t # qhasm: assign xmm3 to q # qhasm: s = t # asm 1: movdqa s=int6464#7 # asm 2: movdqa s=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 t += p # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 25 # asm 1: psrld $25,z4_stack=stack128#33 # asm 2: movdqa z4_stack=544(%esp) movdqa %xmm3,544(%esp) # qhasm: t = p # asm 1: movdqa t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa %xmm0,%xmm2 # qhasm: uint32323232 t += q # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 23 # asm 1: psrld $23,z8_stack=stack128#34 # asm 2: movdqa z8_stack=560(%esp) movdqa %xmm1,560(%esp) # qhasm: uint32323232 q += r # asm 1: paddd u=int6464#3 # asm 2: movdqa u=%xmm2 movdqa %xmm3,%xmm2 # qhasm: uint32323232 q >>= 19 # asm 1: psrld $19,mt=int6464#3 # asm 2: movdqa mt=%xmm2 movdqa 464(%esp),%xmm2 # qhasm: mp = z5_stack # asm 1: movdqa mp=int6464#5 # asm 2: movdqa mp=%xmm4 movdqa 352(%esp),%xmm4 # qhasm: mq = z9_stack # asm 1: movdqa mq=int6464#4 # asm 2: movdqa mq=%xmm3 movdqa 528(%esp),%xmm3 # qhasm: mr = z13_stack # asm 1: movdqa mr=int6464#6 # asm 2: movdqa mr=%xmm5 movdqa 496(%esp),%xmm5 # qhasm: z12_stack = s # asm 1: movdqa z12_stack=stack128#30 # asm 2: movdqa z12_stack=496(%esp) movdqa %xmm6,496(%esp) # qhasm: uint32323232 r += s # asm 1: paddd u=int6464#7 # asm 2: movdqa u=%xmm6 movdqa %xmm1,%xmm6 # qhasm: uint32323232 r >>= 14 # asm 1: psrld $14,z0_stack=stack128#21 # asm 2: movdqa z0_stack=352(%esp) movdqa %xmm0,352(%esp) # qhasm: assign xmm2 to mt # qhasm: assign xmm3 to mq # qhasm: assign xmm4 to mp # qhasm: assign xmm5 to mr # qhasm: ms = mt # asm 1: movdqa ms=int6464#7 # asm 2: movdqa ms=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 mt += mp # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm2,%xmm0 # qhasm: uint32323232 mt >>= 25 # asm 1: psrld $25,z9_stack=stack128#32 # asm 2: movdqa z9_stack=528(%esp) movdqa %xmm3,528(%esp) # qhasm: mt = mp # asm 1: movdqa mt=int6464#1 # asm 2: movdqa mt=%xmm0 movdqa %xmm4,%xmm0 # qhasm: uint32323232 mt += mq # asm 1: paddd mu=int6464#2 # asm 2: movdqa mu=%xmm1 movdqa %xmm0,%xmm1 # qhasm: uint32323232 mt >>= 23 # asm 1: psrld $23,z13_stack=stack128#35 # asm 2: movdqa z13_stack=576(%esp) movdqa %xmm5,576(%esp) # qhasm: uint32323232 mq += mr # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm3,%xmm0 # qhasm: uint32323232 mq >>= 19 # asm 1: psrld $19,t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa 432(%esp),%xmm2 # qhasm: p = z10_stack # asm 1: movdqa p=int6464#1 # asm 2: movdqa p=%xmm0 movdqa 368(%esp),%xmm0 # qhasm: q = z14_stack # asm 1: movdqa q=int6464#4 # asm 2: movdqa q=%xmm3 movdqa 400(%esp),%xmm3 # qhasm: r = z2_stack # asm 1: movdqa r=int6464#2 # asm 2: movdqa r=%xmm1 movdqa 512(%esp),%xmm1 # qhasm: z1_stack = ms # asm 1: movdqa z1_stack=stack128#22 # asm 2: movdqa z1_stack=368(%esp) movdqa %xmm6,368(%esp) # qhasm: uint32323232 mr += ms # asm 1: paddd mu=int6464#7 # asm 2: movdqa mu=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 mr >>= 14 # asm 1: psrld $14,z5_stack=stack128#24 # asm 2: movdqa z5_stack=400(%esp) movdqa %xmm4,400(%esp) # qhasm: assign xmm0 to p # qhasm: assign xmm1 to r # qhasm: assign xmm2 to t # qhasm: assign xmm3 to q # qhasm: s = t # asm 1: movdqa s=int6464#7 # asm 2: movdqa s=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 t += p # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 25 # asm 1: psrld $25,z14_stack=stack128#36 # asm 2: movdqa z14_stack=592(%esp) movdqa %xmm3,592(%esp) # qhasm: t = p # asm 1: movdqa t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa %xmm0,%xmm2 # qhasm: uint32323232 t += q # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 23 # asm 1: psrld $23,z2_stack=stack128#26 # asm 2: movdqa z2_stack=432(%esp) movdqa %xmm1,432(%esp) # qhasm: uint32323232 q += r # asm 1: paddd u=int6464#3 # asm 2: movdqa u=%xmm2 movdqa %xmm3,%xmm2 # qhasm: uint32323232 q >>= 19 # asm 1: psrld $19,mt=int6464#3 # asm 2: movdqa mt=%xmm2 movdqa 448(%esp),%xmm2 # qhasm: mp = z15_stack # asm 1: movdqa mp=int6464#5 # asm 2: movdqa mp=%xmm4 movdqa 384(%esp),%xmm4 # qhasm: mq = z3_stack # asm 1: movdqa mq=int6464#4 # asm 2: movdqa mq=%xmm3 movdqa 416(%esp),%xmm3 # qhasm: mr = z7_stack # asm 1: movdqa mr=int6464#6 # asm 2: movdqa mr=%xmm5 movdqa 480(%esp),%xmm5 # qhasm: z6_stack = s # asm 1: movdqa z6_stack=stack128#23 # asm 2: movdqa z6_stack=384(%esp) movdqa %xmm6,384(%esp) # qhasm: uint32323232 r += s # asm 1: paddd u=int6464#7 # asm 2: movdqa u=%xmm6 movdqa %xmm1,%xmm6 # qhasm: uint32323232 r >>= 14 # asm 1: psrld $14,z10_stack=stack128#27 # asm 2: movdqa z10_stack=448(%esp) movdqa %xmm0,448(%esp) # qhasm: assign xmm2 to mt # qhasm: assign xmm3 to mq # qhasm: assign xmm4 to mp # qhasm: assign xmm5 to mr # qhasm: ms = mt # asm 1: movdqa ms=int6464#7 # asm 2: movdqa ms=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 mt += mp # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm2,%xmm0 # qhasm: uint32323232 mt >>= 25 # asm 1: psrld $25,z3_stack=stack128#25 # asm 2: movdqa z3_stack=416(%esp) movdqa %xmm3,416(%esp) # qhasm: mt = mp # asm 1: movdqa mt=int6464#1 # asm 2: movdqa mt=%xmm0 movdqa %xmm4,%xmm0 # qhasm: uint32323232 mt += mq # asm 1: paddd mu=int6464#2 # asm 2: movdqa mu=%xmm1 movdqa %xmm0,%xmm1 # qhasm: uint32323232 mt >>= 23 # asm 1: psrld $23,z7_stack=stack128#29 # asm 2: movdqa z7_stack=480(%esp) movdqa %xmm5,480(%esp) # qhasm: uint32323232 mq += mr # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm3,%xmm0 # qhasm: uint32323232 mq >>= 19 # asm 1: psrld $19,t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa 416(%esp),%xmm2 # qhasm: p = z0_stack # asm 1: movdqa p=int6464#1 # asm 2: movdqa p=%xmm0 movdqa 352(%esp),%xmm0 # qhasm: q = z1_stack # asm 1: movdqa q=int6464#4 # asm 2: movdqa q=%xmm3 movdqa 368(%esp),%xmm3 # qhasm: r = z2_stack # asm 1: movdqa r=int6464#2 # asm 2: movdqa r=%xmm1 movdqa 432(%esp),%xmm1 # qhasm: z11_stack = ms # asm 1: movdqa z11_stack=stack128#21 # asm 2: movdqa z11_stack=352(%esp) movdqa %xmm6,352(%esp) # qhasm: uint32323232 mr += ms # asm 1: paddd mu=int6464#7 # asm 2: movdqa mu=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 mr >>= 14 # asm 1: psrld $14,z15_stack=stack128#22 # asm 2: movdqa z15_stack=368(%esp) movdqa %xmm4,368(%esp) # qhasm: assign xmm0 to p # qhasm: assign xmm1 to r # qhasm: assign xmm2 to t # qhasm: assign xmm3 to q # qhasm: s = t # asm 1: movdqa s=int6464#7 # asm 2: movdqa s=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 t += p # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 25 # asm 1: psrld $25,z1_stack=stack128#28 # asm 2: movdqa z1_stack=464(%esp) movdqa %xmm3,464(%esp) # qhasm: t = p # asm 1: movdqa t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa %xmm0,%xmm2 # qhasm: uint32323232 t += q # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 23 # asm 1: psrld $23,z2_stack=stack128#31 # asm 2: movdqa z2_stack=512(%esp) movdqa %xmm1,512(%esp) # qhasm: uint32323232 q += r # asm 1: paddd u=int6464#3 # asm 2: movdqa u=%xmm2 movdqa %xmm3,%xmm2 # qhasm: uint32323232 q >>= 19 # asm 1: psrld $19,mt=int6464#3 # asm 2: movdqa mt=%xmm2 movdqa 544(%esp),%xmm2 # qhasm: mp = z5_stack # asm 1: movdqa mp=int6464#5 # asm 2: movdqa mp=%xmm4 movdqa 400(%esp),%xmm4 # qhasm: mq = z6_stack # asm 1: movdqa mq=int6464#4 # asm 2: movdqa mq=%xmm3 movdqa 384(%esp),%xmm3 # qhasm: mr = z7_stack # asm 1: movdqa mr=int6464#6 # asm 2: movdqa mr=%xmm5 movdqa 480(%esp),%xmm5 # qhasm: z3_stack = s # asm 1: movdqa z3_stack=stack128#25 # asm 2: movdqa z3_stack=416(%esp) movdqa %xmm6,416(%esp) # qhasm: uint32323232 r += s # asm 1: paddd u=int6464#7 # asm 2: movdqa u=%xmm6 movdqa %xmm1,%xmm6 # qhasm: uint32323232 r >>= 14 # asm 1: psrld $14,z0_stack=stack128#33 # asm 2: movdqa z0_stack=544(%esp) movdqa %xmm0,544(%esp) # qhasm: assign xmm2 to mt # qhasm: assign xmm3 to mq # qhasm: assign xmm4 to mp # qhasm: assign xmm5 to mr # qhasm: ms = mt # asm 1: movdqa ms=int6464#7 # asm 2: movdqa ms=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 mt += mp # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm2,%xmm0 # qhasm: uint32323232 mt >>= 25 # asm 1: psrld $25,z6_stack=stack128#26 # asm 2: movdqa z6_stack=432(%esp) movdqa %xmm3,432(%esp) # qhasm: mt = mp # asm 1: movdqa mt=int6464#1 # asm 2: movdqa mt=%xmm0 movdqa %xmm4,%xmm0 # qhasm: uint32323232 mt += mq # asm 1: paddd mu=int6464#2 # asm 2: movdqa mu=%xmm1 movdqa %xmm0,%xmm1 # qhasm: uint32323232 mt >>= 23 # asm 1: psrld $23,z7_stack=stack128#29 # asm 2: movdqa z7_stack=480(%esp) movdqa %xmm5,480(%esp) # qhasm: uint32323232 mq += mr # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm3,%xmm0 # qhasm: uint32323232 mq >>= 19 # asm 1: psrld $19,t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa 528(%esp),%xmm2 # qhasm: p = z10_stack # asm 1: movdqa p=int6464#1 # asm 2: movdqa p=%xmm0 movdqa 448(%esp),%xmm0 # qhasm: q = z11_stack # asm 1: movdqa q=int6464#4 # asm 2: movdqa q=%xmm3 movdqa 352(%esp),%xmm3 # qhasm: r = z8_stack # asm 1: movdqa r=int6464#2 # asm 2: movdqa r=%xmm1 movdqa 560(%esp),%xmm1 # qhasm: z4_stack = ms # asm 1: movdqa z4_stack=stack128#34 # asm 2: movdqa z4_stack=560(%esp) movdqa %xmm6,560(%esp) # qhasm: uint32323232 mr += ms # asm 1: paddd mu=int6464#7 # asm 2: movdqa mu=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 mr >>= 14 # asm 1: psrld $14,z5_stack=stack128#21 # asm 2: movdqa z5_stack=352(%esp) movdqa %xmm4,352(%esp) # qhasm: assign xmm0 to p # qhasm: assign xmm1 to r # qhasm: assign xmm2 to t # qhasm: assign xmm3 to q # qhasm: s = t # asm 1: movdqa s=int6464#7 # asm 2: movdqa s=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 t += p # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 25 # asm 1: psrld $25,z11_stack=stack128#27 # asm 2: movdqa z11_stack=448(%esp) movdqa %xmm3,448(%esp) # qhasm: t = p # asm 1: movdqa t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa %xmm0,%xmm2 # qhasm: uint32323232 t += q # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 23 # asm 1: psrld $23,z8_stack=stack128#37 # asm 2: movdqa z8_stack=608(%esp) movdqa %xmm1,608(%esp) # qhasm: uint32323232 q += r # asm 1: paddd u=int6464#3 # asm 2: movdqa u=%xmm2 movdqa %xmm3,%xmm2 # qhasm: uint32323232 q >>= 19 # asm 1: psrld $19,mt=int6464#3 # asm 2: movdqa mt=%xmm2 movdqa 592(%esp),%xmm2 # qhasm: mp = z15_stack # asm 1: movdqa mp=int6464#5 # asm 2: movdqa mp=%xmm4 movdqa 368(%esp),%xmm4 # qhasm: mq = z12_stack # asm 1: movdqa mq=int6464#4 # asm 2: movdqa mq=%xmm3 movdqa 496(%esp),%xmm3 # qhasm: mr = z13_stack # asm 1: movdqa mr=int6464#6 # asm 2: movdqa mr=%xmm5 movdqa 576(%esp),%xmm5 # qhasm: z9_stack = s # asm 1: movdqa z9_stack=stack128#32 # asm 2: movdqa z9_stack=528(%esp) movdqa %xmm6,528(%esp) # qhasm: uint32323232 r += s # asm 1: paddd u=int6464#7 # asm 2: movdqa u=%xmm6 movdqa %xmm1,%xmm6 # qhasm: uint32323232 r >>= 14 # asm 1: psrld $14,z10_stack=stack128#22 # asm 2: movdqa z10_stack=368(%esp) movdqa %xmm0,368(%esp) # qhasm: assign xmm2 to mt # qhasm: assign xmm3 to mq # qhasm: assign xmm4 to mp # qhasm: assign xmm5 to mr # qhasm: ms = mt # asm 1: movdqa ms=int6464#7 # asm 2: movdqa ms=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 mt += mp # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm2,%xmm0 # qhasm: uint32323232 mt >>= 25 # asm 1: psrld $25,z12_stack=stack128#35 # asm 2: movdqa z12_stack=576(%esp) movdqa %xmm3,576(%esp) # qhasm: mt = mp # asm 1: movdqa mt=int6464#1 # asm 2: movdqa mt=%xmm0 movdqa %xmm4,%xmm0 # qhasm: uint32323232 mt += mq # asm 1: paddd mu=int6464#2 # asm 2: movdqa mu=%xmm1 movdqa %xmm0,%xmm1 # qhasm: uint32323232 mt >>= 23 # asm 1: psrld $23,z13_stack=stack128#30 # asm 2: movdqa z13_stack=496(%esp) movdqa %xmm5,496(%esp) # qhasm: uint32323232 mq += mr # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm3,%xmm0 # qhasm: uint32323232 mq >>= 19 # asm 1: psrld $19,t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa 576(%esp),%xmm2 # qhasm: p = z0_stack # asm 1: movdqa p=int6464#1 # asm 2: movdqa p=%xmm0 movdqa 544(%esp),%xmm0 # qhasm: q = z4_stack # asm 1: movdqa q=int6464#4 # asm 2: movdqa q=%xmm3 movdqa 560(%esp),%xmm3 # qhasm: r = z8_stack # asm 1: movdqa r=int6464#2 # asm 2: movdqa r=%xmm1 movdqa 608(%esp),%xmm1 # qhasm: z14_stack = ms # asm 1: movdqa z14_stack=stack128#24 # asm 2: movdqa z14_stack=400(%esp) movdqa %xmm6,400(%esp) # qhasm: uint32323232 mr += ms # asm 1: paddd mu=int6464#7 # asm 2: movdqa mu=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 mr >>= 14 # asm 1: psrld $14,z15_stack=stack128#23 # asm 2: movdqa z15_stack=384(%esp) movdqa %xmm4,384(%esp) # qhasm: unsigned>? i -= 2 # asm 1: sub $2, ja ._mainloop1 # qhasm: out = out_stack # asm 1: movl out=int32#6 # asm 2: movl out=%edi movl 20(%esp),%edi # qhasm: z0 = z0_stack # asm 1: movdqa z0=int6464#1 # asm 2: movdqa z0=%xmm0 movdqa 544(%esp),%xmm0 # qhasm: z1 = z1_stack # asm 1: movdqa z1=int6464#2 # asm 2: movdqa z1=%xmm1 movdqa 464(%esp),%xmm1 # qhasm: z2 = z2_stack # asm 1: movdqa z2=int6464#3 # asm 2: movdqa z2=%xmm2 movdqa 512(%esp),%xmm2 # qhasm: z3 = z3_stack # asm 1: movdqa z3=int6464#4 # asm 2: movdqa z3=%xmm3 movdqa 416(%esp),%xmm3 # qhasm: uint32323232 z0 += orig0 # asm 1: paddd in0=int32#1 # asm 2: movd in0=%eax movd %xmm0,%eax # qhasm: in1 = z1 # asm 1: movd in1=int32#2 # asm 2: movd in1=%ecx movd %xmm1,%ecx # qhasm: in2 = z2 # asm 1: movd in2=int32#3 # asm 2: movd in2=%edx movd %xmm2,%edx # qhasm: in3 = z3 # asm 1: movd in3=int32#4 # asm 2: movd in3=%ebx movd %xmm3,%ebx # qhasm: z0 <<<= 96 # asm 1: pshufd $0x39,in0=int32#1 # asm 2: movd in0=%eax movd %xmm0,%eax # qhasm: in1 = z1 # asm 1: movd in1=int32#2 # asm 2: movd in1=%ecx movd %xmm1,%ecx # qhasm: in2 = z2 # asm 1: movd in2=int32#3 # asm 2: movd in2=%edx movd %xmm2,%edx # qhasm: in3 = z3 # asm 1: movd in3=int32#4 # asm 2: movd in3=%ebx movd %xmm3,%ebx # qhasm: z0 <<<= 96 # asm 1: pshufd $0x39,in0=int32#1 # asm 2: movd in0=%eax movd %xmm0,%eax # qhasm: in1 = z1 # asm 1: movd in1=int32#2 # asm 2: movd in1=%ecx movd %xmm1,%ecx # qhasm: in2 = z2 # asm 1: movd in2=int32#3 # asm 2: movd in2=%edx movd %xmm2,%edx # qhasm: in3 = z3 # asm 1: movd in3=int32#4 # asm 2: movd in3=%ebx movd %xmm3,%ebx # qhasm: z0 <<<= 96 # asm 1: pshufd $0x39,in0=int32#1 # asm 2: movd in0=%eax movd %xmm0,%eax # qhasm: in1 = z1 # asm 1: movd in1=int32#2 # asm 2: movd in1=%ecx movd %xmm1,%ecx # qhasm: in2 = z2 # asm 1: movd in2=int32#3 # asm 2: movd in2=%edx movd %xmm2,%edx # qhasm: in3 = z3 # asm 1: movd in3=int32#4 # asm 2: movd in3=%ebx movd %xmm3,%ebx # qhasm: in0 ^= *(uint32 *) (m + 192) # asm 1: xorl 192(z4=int6464#1 # asm 2: movdqa z4=%xmm0 movdqa 560(%esp),%xmm0 # qhasm: z5 = z5_stack # asm 1: movdqa z5=int6464#2 # asm 2: movdqa z5=%xmm1 movdqa 352(%esp),%xmm1 # qhasm: z6 = z6_stack # asm 1: movdqa z6=int6464#3 # asm 2: movdqa z6=%xmm2 movdqa 432(%esp),%xmm2 # qhasm: z7 = z7_stack # asm 1: movdqa z7=int6464#4 # asm 2: movdqa z7=%xmm3 movdqa 480(%esp),%xmm3 # qhasm: uint32323232 z4 += orig4 # asm 1: paddd in4=int32#1 # asm 2: movd in4=%eax movd %xmm0,%eax # qhasm: in5 = z5 # asm 1: movd in5=int32#2 # asm 2: movd in5=%ecx movd %xmm1,%ecx # qhasm: in6 = z6 # asm 1: movd in6=int32#3 # asm 2: movd in6=%edx movd %xmm2,%edx # qhasm: in7 = z7 # asm 1: movd in7=int32#4 # asm 2: movd in7=%ebx movd %xmm3,%ebx # qhasm: z4 <<<= 96 # asm 1: pshufd $0x39,in4=int32#1 # asm 2: movd in4=%eax movd %xmm0,%eax # qhasm: in5 = z5 # asm 1: movd in5=int32#2 # asm 2: movd in5=%ecx movd %xmm1,%ecx # qhasm: in6 = z6 # asm 1: movd in6=int32#3 # asm 2: movd in6=%edx movd %xmm2,%edx # qhasm: in7 = z7 # asm 1: movd in7=int32#4 # asm 2: movd in7=%ebx movd %xmm3,%ebx # qhasm: z4 <<<= 96 # asm 1: pshufd $0x39,in4=int32#1 # asm 2: movd in4=%eax movd %xmm0,%eax # qhasm: in5 = z5 # asm 1: movd in5=int32#2 # asm 2: movd in5=%ecx movd %xmm1,%ecx # qhasm: in6 = z6 # asm 1: movd in6=int32#3 # asm 2: movd in6=%edx movd %xmm2,%edx # qhasm: in7 = z7 # asm 1: movd in7=int32#4 # asm 2: movd in7=%ebx movd %xmm3,%ebx # qhasm: z4 <<<= 96 # asm 1: pshufd $0x39,in4=int32#1 # asm 2: movd in4=%eax movd %xmm0,%eax # qhasm: in5 = z5 # asm 1: movd in5=int32#2 # asm 2: movd in5=%ecx movd %xmm1,%ecx # qhasm: in6 = z6 # asm 1: movd in6=int32#3 # asm 2: movd in6=%edx movd %xmm2,%edx # qhasm: in7 = z7 # asm 1: movd in7=int32#4 # asm 2: movd in7=%ebx movd %xmm3,%ebx # qhasm: in4 ^= *(uint32 *) (m + 208) # asm 1: xorl 208(z8=int6464#1 # asm 2: movdqa z8=%xmm0 movdqa 608(%esp),%xmm0 # qhasm: z9 = z9_stack # asm 1: movdqa z9=int6464#2 # asm 2: movdqa z9=%xmm1 movdqa 528(%esp),%xmm1 # qhasm: z10 = z10_stack # asm 1: movdqa z10=int6464#3 # asm 2: movdqa z10=%xmm2 movdqa 368(%esp),%xmm2 # qhasm: z11 = z11_stack # asm 1: movdqa z11=int6464#4 # asm 2: movdqa z11=%xmm3 movdqa 448(%esp),%xmm3 # qhasm: uint32323232 z8 += orig8 # asm 1: paddd in8=int32#1 # asm 2: movd in8=%eax movd %xmm0,%eax # qhasm: in9 = z9 # asm 1: movd in9=int32#2 # asm 2: movd in9=%ecx movd %xmm1,%ecx # qhasm: in10 = z10 # asm 1: movd in10=int32#3 # asm 2: movd in10=%edx movd %xmm2,%edx # qhasm: in11 = z11 # asm 1: movd in11=int32#4 # asm 2: movd in11=%ebx movd %xmm3,%ebx # qhasm: z8 <<<= 96 # asm 1: pshufd $0x39,in8=int32#1 # asm 2: movd in8=%eax movd %xmm0,%eax # qhasm: in9 = z9 # asm 1: movd in9=int32#2 # asm 2: movd in9=%ecx movd %xmm1,%ecx # qhasm: in10 = z10 # asm 1: movd in10=int32#3 # asm 2: movd in10=%edx movd %xmm2,%edx # qhasm: in11 = z11 # asm 1: movd in11=int32#4 # asm 2: movd in11=%ebx movd %xmm3,%ebx # qhasm: z8 <<<= 96 # asm 1: pshufd $0x39,in8=int32#1 # asm 2: movd in8=%eax movd %xmm0,%eax # qhasm: in9 = z9 # asm 1: movd in9=int32#2 # asm 2: movd in9=%ecx movd %xmm1,%ecx # qhasm: in10 = z10 # asm 1: movd in10=int32#3 # asm 2: movd in10=%edx movd %xmm2,%edx # qhasm: in11 = z11 # asm 1: movd in11=int32#4 # asm 2: movd in11=%ebx movd %xmm3,%ebx # qhasm: z8 <<<= 96 # asm 1: pshufd $0x39,in8=int32#1 # asm 2: movd in8=%eax movd %xmm0,%eax # qhasm: in9 = z9 # asm 1: movd in9=int32#2 # asm 2: movd in9=%ecx movd %xmm1,%ecx # qhasm: in10 = z10 # asm 1: movd in10=int32#3 # asm 2: movd in10=%edx movd %xmm2,%edx # qhasm: in11 = z11 # asm 1: movd in11=int32#4 # asm 2: movd in11=%ebx movd %xmm3,%ebx # qhasm: in8 ^= *(uint32 *) (m + 224) # asm 1: xorl 224(z12=int6464#1 # asm 2: movdqa z12=%xmm0 movdqa 576(%esp),%xmm0 # qhasm: z13 = z13_stack # asm 1: movdqa z13=int6464#2 # asm 2: movdqa z13=%xmm1 movdqa 496(%esp),%xmm1 # qhasm: z14 = z14_stack # asm 1: movdqa z14=int6464#3 # asm 2: movdqa z14=%xmm2 movdqa 400(%esp),%xmm2 # qhasm: z15 = z15_stack # asm 1: movdqa z15=int6464#4 # asm 2: movdqa z15=%xmm3 movdqa 384(%esp),%xmm3 # qhasm: uint32323232 z12 += orig12 # asm 1: paddd in12=int32#1 # asm 2: movd in12=%eax movd %xmm0,%eax # qhasm: in13 = z13 # asm 1: movd in13=int32#2 # asm 2: movd in13=%ecx movd %xmm1,%ecx # qhasm: in14 = z14 # asm 1: movd in14=int32#3 # asm 2: movd in14=%edx movd %xmm2,%edx # qhasm: in15 = z15 # asm 1: movd in15=int32#4 # asm 2: movd in15=%ebx movd %xmm3,%ebx # qhasm: z12 <<<= 96 # asm 1: pshufd $0x39,in12=int32#1 # asm 2: movd in12=%eax movd %xmm0,%eax # qhasm: in13 = z13 # asm 1: movd in13=int32#2 # asm 2: movd in13=%ecx movd %xmm1,%ecx # qhasm: in14 = z14 # asm 1: movd in14=int32#3 # asm 2: movd in14=%edx movd %xmm2,%edx # qhasm: in15 = z15 # asm 1: movd in15=int32#4 # asm 2: movd in15=%ebx movd %xmm3,%ebx # qhasm: z12 <<<= 96 # asm 1: pshufd $0x39,in12=int32#1 # asm 2: movd in12=%eax movd %xmm0,%eax # qhasm: in13 = z13 # asm 1: movd in13=int32#2 # asm 2: movd in13=%ecx movd %xmm1,%ecx # qhasm: in14 = z14 # asm 1: movd in14=int32#3 # asm 2: movd in14=%edx movd %xmm2,%edx # qhasm: in15 = z15 # asm 1: movd in15=int32#4 # asm 2: movd in15=%ebx movd %xmm3,%ebx # qhasm: z12 <<<= 96 # asm 1: pshufd $0x39,in12=int32#1 # asm 2: movd in12=%eax movd %xmm0,%eax # qhasm: in13 = z13 # asm 1: movd in13=int32#2 # asm 2: movd in13=%ecx movd %xmm1,%ecx # qhasm: in14 = z14 # asm 1: movd in14=int32#3 # asm 2: movd in14=%edx movd %xmm2,%edx # qhasm: in15 = z15 # asm 1: movd in15=int32#4 # asm 2: movd in15=%ebx movd %xmm3,%ebx # qhasm: in12 ^= *(uint32 *) (m + 240) # asm 1: xorl 240(bytes=int32#1 # asm 2: movl bytes=%eax movl 24(%esp),%eax # qhasm: bytes -= 256 # asm 1: sub $256,out_stack=stack32#6 # asm 2: movl out_stack=20(%esp) movl %edi,20(%esp) # qhasm: unsigned? bytes - 0 # asm 1: cmp $0, jbe ._done # comment:fp stack unchanged by fallthrough # qhasm: bytesbetween1and255: ._bytesbetween1and255: # qhasm: unsignedctarget=stack32#6 # asm 2: movl ctarget=20(%esp) movl %edi,20(%esp) # qhasm: out = &tmp # asm 1: leal out=int32#6 # asm 2: leal out=%edi leal 640(%esp),%edi # qhasm: i = bytes # asm 1: mov i=int32#2 # asm 2: mov i=%ecx mov %eax,%ecx # qhasm: while (i) { *out++ = *m++; --i } rep movsb # qhasm: out = &tmp # asm 1: leal out=int32#6 # asm 2: leal out=%edi leal 640(%esp),%edi # qhasm: m = &tmp # asm 1: leal m=int32#5 # asm 2: leal m=%esi leal 640(%esp),%esi # comment:fp stack unchanged by fallthrough # qhasm: nocopy: ._nocopy: # qhasm: bytes_stack = bytes # asm 1: movl bytes_stack=stack32#7 # asm 2: movl bytes_stack=24(%esp) movl %eax,24(%esp) # qhasm: diag0 = x0 # asm 1: movdqa diag0=int6464#1 # asm 2: movdqa diag0=%xmm0 movdqa 64(%esp),%xmm0 # qhasm: diag1 = x1 # asm 1: movdqa diag1=int6464#2 # asm 2: movdqa diag1=%xmm1 movdqa 48(%esp),%xmm1 # qhasm: diag2 = x2 # asm 1: movdqa diag2=int6464#3 # asm 2: movdqa diag2=%xmm2 movdqa 80(%esp),%xmm2 # qhasm: diag3 = x3 # asm 1: movdqa diag3=int6464#4 # asm 2: movdqa diag3=%xmm3 movdqa 32(%esp),%xmm3 # qhasm: a0 = diag1 # asm 1: movdqa a0=int6464#5 # asm 2: movdqa a0=%xmm4 movdqa %xmm1,%xmm4 # qhasm: i = 20 # asm 1: mov $20,>i=int32#1 # asm 2: mov $20,>i=%eax mov $20,%eax # qhasm: mainloop2: ._mainloop2: # qhasm: uint32323232 a0 += diag0 # asm 1: paddd a1=int6464#6 # asm 2: movdqa a1=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b0 = a0 # asm 1: movdqa b0=int6464#7 # asm 2: movdqa b0=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a0 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a2=int6464#5 # asm 2: movdqa a2=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b1 = a1 # asm 1: movdqa b1=int6464#7 # asm 2: movdqa b1=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a1 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a3=int6464#6 # asm 2: movdqa a3=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b2 = a2 # asm 1: movdqa b2=int6464#7 # asm 2: movdqa b2=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a2 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,a4=int6464#5 # asm 2: movdqa a4=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b3 = a3 # asm 1: movdqa b3=int6464#7 # asm 2: movdqa b3=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a3 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,a5=int6464#6 # asm 2: movdqa a5=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b4 = a4 # asm 1: movdqa b4=int6464#7 # asm 2: movdqa b4=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a4 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a6=int6464#5 # asm 2: movdqa a6=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b5 = a5 # asm 1: movdqa b5=int6464#7 # asm 2: movdqa b5=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a5 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a7=int6464#6 # asm 2: movdqa a7=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b6 = a6 # asm 1: movdqa b6=int6464#7 # asm 2: movdqa b6=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a6 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,a0=int6464#5 # asm 2: movdqa a0=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b7 = a7 # asm 1: movdqa b7=int6464#7 # asm 2: movdqa b7=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a7 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,a1=int6464#6 # asm 2: movdqa a1=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b0 = a0 # asm 1: movdqa b0=int6464#7 # asm 2: movdqa b0=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a0 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a2=int6464#5 # asm 2: movdqa a2=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b1 = a1 # asm 1: movdqa b1=int6464#7 # asm 2: movdqa b1=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a1 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a3=int6464#6 # asm 2: movdqa a3=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b2 = a2 # asm 1: movdqa b2=int6464#7 # asm 2: movdqa b2=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a2 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,a4=int6464#5 # asm 2: movdqa a4=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b3 = a3 # asm 1: movdqa b3=int6464#7 # asm 2: movdqa b3=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a3 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,a5=int6464#6 # asm 2: movdqa a5=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b4 = a4 # asm 1: movdqa b4=int6464#7 # asm 2: movdqa b4=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a4 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a6=int6464#5 # asm 2: movdqa a6=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b5 = a5 # asm 1: movdqa b5=int6464#7 # asm 2: movdqa b5=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a5 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a7=int6464#6 # asm 2: movdqa a7=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b6 = a6 # asm 1: movdqa b6=int6464#7 # asm 2: movdqa b6=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a6 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,? i -= 4 # asm 1: sub $4,a0=int6464#5 # asm 2: movdqa a0=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b7 = a7 # asm 1: movdqa b7=int6464#7 # asm 2: movdqa b7=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a7 <<= 18 # asm 1: pslld $18,b0=int6464#8,>b0=int6464#8 # asm 2: pxor >b0=%xmm7,>b0=%xmm7 pxor %xmm7,%xmm7 # qhasm: uint32323232 b7 >>= 14 # asm 1: psrld $14, ja ._mainloop2 # qhasm: uint32323232 diag0 += x0 # asm 1: paddd in0=int32#1 # asm 2: movd in0=%eax movd %xmm0,%eax # qhasm: in12 = diag1 # asm 1: movd in12=int32#2 # asm 2: movd in12=%ecx movd %xmm1,%ecx # qhasm: in8 = diag2 # asm 1: movd in8=int32#3 # asm 2: movd in8=%edx movd %xmm2,%edx # qhasm: in4 = diag3 # asm 1: movd in4=int32#4 # asm 2: movd in4=%ebx movd %xmm3,%ebx # qhasm: diag0 <<<= 96 # asm 1: pshufd $0x39,in5=int32#1 # asm 2: movd in5=%eax movd %xmm0,%eax # qhasm: in1 = diag1 # asm 1: movd in1=int32#2 # asm 2: movd in1=%ecx movd %xmm1,%ecx # qhasm: in13 = diag2 # asm 1: movd in13=int32#3 # asm 2: movd in13=%edx movd %xmm2,%edx # qhasm: in9 = diag3 # asm 1: movd in9=int32#4 # asm 2: movd in9=%ebx movd %xmm3,%ebx # qhasm: diag0 <<<= 96 # asm 1: pshufd $0x39,in10=int32#1 # asm 2: movd in10=%eax movd %xmm0,%eax # qhasm: in6 = diag1 # asm 1: movd in6=int32#2 # asm 2: movd in6=%ecx movd %xmm1,%ecx # qhasm: in2 = diag2 # asm 1: movd in2=int32#3 # asm 2: movd in2=%edx movd %xmm2,%edx # qhasm: in14 = diag3 # asm 1: movd in14=int32#4 # asm 2: movd in14=%ebx movd %xmm3,%ebx # qhasm: diag0 <<<= 96 # asm 1: pshufd $0x39,in15=int32#1 # asm 2: movd in15=%eax movd %xmm0,%eax # qhasm: in11 = diag1 # asm 1: movd in11=int32#2 # asm 2: movd in11=%ecx movd %xmm1,%ecx # qhasm: in7 = diag2 # asm 1: movd in7=int32#3 # asm 2: movd in7=%edx movd %xmm2,%edx # qhasm: in3 = diag3 # asm 1: movd in3=int32#4 # asm 2: movd in3=%ebx movd %xmm3,%ebx # qhasm: in15 ^= *(uint32 *) (m + 60) # asm 1: xorl 60(bytes=int32#1 # asm 2: movl bytes=%eax movl 24(%esp),%eax # qhasm: in8 = ((uint32 *)&x2)[0] # asm 1: movl in8=int32#2 # asm 2: movl in8=%ecx movl 80(%esp),%ecx # qhasm: in9 = ((uint32 *)&x3)[1] # asm 1: movl 4+in9=int32#3 # asm 2: movl 4+in9=%edx movl 4+32(%esp),%edx # qhasm: carry? in8 += 1 # asm 1: add $1,x2=stack128#4 # asm 2: movl x2=80(%esp) movl %ecx,80(%esp) # qhasm: ((uint32 *)&x3)[1] = in9 # asm 1: movl ? unsigned ja ._bytesatleast65 # comment:fp stack unchanged by jump # qhasm: goto bytesatleast64 if !unsigned< jae ._bytesatleast64 # qhasm: m = out # asm 1: mov m=int32#5 # asm 2: mov m=%esi mov %edi,%esi # qhasm: out = ctarget # asm 1: movl out=int32#6 # asm 2: movl out=%edi movl 20(%esp),%edi # qhasm: i = bytes # asm 1: mov i=int32#2 # asm 2: mov i=%ecx mov %eax,%ecx # qhasm: while (i) { *out++ = *m++; --i } rep movsb # comment:fp stack unchanged by fallthrough # qhasm: bytesatleast64: ._bytesatleast64: # comment:fp stack unchanged by fallthrough # qhasm: done: ._done: # qhasm: eax = eax_stack # asm 1: movl eax=int32#1 # asm 2: movl eax=%eax movl 0(%esp),%eax # qhasm: ebx = ebx_stack # asm 1: movl ebx=int32#4 # asm 2: movl ebx=%ebx movl 4(%esp),%ebx # qhasm: esi = esi_stack # asm 1: movl esi=int32#5 # asm 2: movl esi=%esi movl 8(%esp),%esi # qhasm: edi = edi_stack # asm 1: movl edi=int32#6 # asm 2: movl edi=%edi movl 12(%esp),%edi # qhasm: ebp = ebp_stack # asm 1: movl ebp=int32#7 # asm 2: movl ebp=%ebp movl 16(%esp),%ebp # qhasm: leave add %eax,%esp xor %eax,%eax ret # qhasm: bytesatleast65: ._bytesatleast65: # qhasm: bytes -= 64 # asm 1: sub $64,r11_stack=stack64#1 # asm 2: movq r11_stack=352(%rsp) movq %r11,352(%rsp) # qhasm: r12_stack = r12_caller # asm 1: movq r12_stack=stack64#2 # asm 2: movq r12_stack=360(%rsp) movq %r12,360(%rsp) # qhasm: r13_stack = r13_caller # asm 1: movq r13_stack=stack64#3 # asm 2: movq r13_stack=368(%rsp) movq %r13,368(%rsp) # qhasm: r14_stack = r14_caller # asm 1: movq r14_stack=stack64#4 # asm 2: movq r14_stack=376(%rsp) movq %r14,376(%rsp) # qhasm: r15_stack = r15_caller # asm 1: movq r15_stack=stack64#5 # asm 2: movq r15_stack=384(%rsp) movq %r15,384(%rsp) # qhasm: rbx_stack = rbx_caller # asm 1: movq rbx_stack=stack64#6 # asm 2: movq rbx_stack=392(%rsp) movq %rbx,392(%rsp) # qhasm: rbp_stack = rbp_caller # asm 1: movq rbp_stack=stack64#7 # asm 2: movq rbp_stack=400(%rsp) movq %rbp,400(%rsp) # qhasm: bytes = arg2 # asm 1: mov bytes=int64#6 # asm 2: mov bytes=%r9 mov %rsi,%r9 # qhasm: out = arg1 # asm 1: mov out=int64#1 # asm 2: mov out=%rdi mov %rdi,%rdi # qhasm: m = out # asm 1: mov m=int64#2 # asm 2: mov m=%rsi mov %rdi,%rsi # qhasm: iv = arg3 # asm 1: mov iv=int64#3 # asm 2: mov iv=%rdx mov %rdx,%rdx # qhasm: k = arg4 # asm 1: mov k=int64#8 # asm 2: mov k=%r10 mov %rcx,%r10 # qhasm: unsigned>? bytes - 0 # asm 1: cmp $0, jbe ._done # qhasm: a = 0 # asm 1: mov $0,>a=int64#7 # asm 2: mov $0,>a=%rax mov $0,%rax # qhasm: i = bytes # asm 1: mov i=int64#4 # asm 2: mov i=%rcx mov %r9,%rcx # qhasm: while (i) { *out++ = a; --i } rep stosb # qhasm: out -= bytes # asm 1: sub r11_stack=stack64#1 # asm 2: movq r11_stack=352(%rsp) movq %r11,352(%rsp) # qhasm: r12_stack = r12_caller # asm 1: movq r12_stack=stack64#2 # asm 2: movq r12_stack=360(%rsp) movq %r12,360(%rsp) # qhasm: r13_stack = r13_caller # asm 1: movq r13_stack=stack64#3 # asm 2: movq r13_stack=368(%rsp) movq %r13,368(%rsp) # qhasm: r14_stack = r14_caller # asm 1: movq r14_stack=stack64#4 # asm 2: movq r14_stack=376(%rsp) movq %r14,376(%rsp) # qhasm: r15_stack = r15_caller # asm 1: movq r15_stack=stack64#5 # asm 2: movq r15_stack=384(%rsp) movq %r15,384(%rsp) # qhasm: rbx_stack = rbx_caller # asm 1: movq rbx_stack=stack64#6 # asm 2: movq rbx_stack=392(%rsp) movq %rbx,392(%rsp) # qhasm: rbp_stack = rbp_caller # asm 1: movq rbp_stack=stack64#7 # asm 2: movq rbp_stack=400(%rsp) movq %rbp,400(%rsp) # qhasm: out = arg1 # asm 1: mov out=int64#1 # asm 2: mov out=%rdi mov %rdi,%rdi # qhasm: m = arg2 # asm 1: mov m=int64#2 # asm 2: mov m=%rsi mov %rsi,%rsi # qhasm: bytes = arg3 # asm 1: mov bytes=int64#6 # asm 2: mov bytes=%r9 mov %rdx,%r9 # qhasm: iv = arg4 # asm 1: mov iv=int64#3 # asm 2: mov iv=%rdx mov %rcx,%rdx # qhasm: k = arg5 # asm 1: mov k=int64#8 # asm 2: mov k=%r10 mov %r8,%r10 # qhasm: unsigned>? bytes - 0 # asm 1: cmp $0, jbe ._done # comment:fp stack unchanged by fallthrough # qhasm: start: ._start: # qhasm: in12 = *(uint32 *) (k + 20) # asm 1: movl 20(in12=int64#4d # asm 2: movl 20(in12=%ecx movl 20(%r10),%ecx # qhasm: in1 = *(uint32 *) (k + 0) # asm 1: movl 0(in1=int64#5d # asm 2: movl 0(in1=%r8d movl 0(%r10),%r8d # qhasm: in6 = *(uint32 *) (iv + 0) # asm 1: movl 0(in6=int64#7d # asm 2: movl 0(in6=%eax movl 0(%rdx),%eax # qhasm: in11 = *(uint32 *) (k + 16) # asm 1: movl 16(in11=int64#9d # asm 2: movl 16(in11=%r11d movl 16(%r10),%r11d # qhasm: ((uint32 *)&x1)[0] = in12 # asm 1: movl x1=stack128#1 # asm 2: movl x1=0(%rsp) movl %ecx,0(%rsp) # qhasm: ((uint32 *)&x1)[1] = in1 # asm 1: movl in8=int64#4 # asm 2: mov $0,>in8=%rcx mov $0,%rcx # qhasm: in13 = *(uint32 *) (k + 24) # asm 1: movl 24(in13=int64#5d # asm 2: movl 24(in13=%r8d movl 24(%r10),%r8d # qhasm: in2 = *(uint32 *) (k + 4) # asm 1: movl 4(in2=int64#7d # asm 2: movl 4(in2=%eax movl 4(%r10),%eax # qhasm: in7 = *(uint32 *) (iv + 4) # asm 1: movl 4(in7=int64#3d # asm 2: movl 4(in7=%edx movl 4(%rdx),%edx # qhasm: ((uint32 *)&x2)[0] = in8 # asm 1: movl x2=stack128#2 # asm 2: movl x2=16(%rsp) movl %ecx,16(%rsp) # qhasm: ((uint32 *)&x2)[1] = in13 # asm 1: movl in4=int64#3d # asm 2: movl 12(in4=%edx movl 12(%r10),%edx # qhasm: in9 = 0 # asm 1: mov $0,>in9=int64#4 # asm 2: mov $0,>in9=%rcx mov $0,%rcx # qhasm: in14 = *(uint32 *) (k + 28) # asm 1: movl 28(in14=int64#5d # asm 2: movl 28(in14=%r8d movl 28(%r10),%r8d # qhasm: in3 = *(uint32 *) (k + 8) # asm 1: movl 8(in3=int64#7d # asm 2: movl 8(in3=%eax movl 8(%r10),%eax # qhasm: ((uint32 *)&x3)[0] = in4 # asm 1: movl x3=stack128#3 # asm 2: movl x3=32(%rsp) movl %edx,32(%rsp) # qhasm: ((uint32 *)&x3)[1] = in9 # asm 1: movl in0=int64#3 # asm 2: mov $1634760805,>in0=%rdx mov $1634760805,%rdx # qhasm: in5 = 857760878 # asm 1: mov $857760878,>in5=int64#4 # asm 2: mov $857760878,>in5=%rcx mov $857760878,%rcx # qhasm: in10 = 2036477234 # asm 1: mov $2036477234,>in10=int64#5 # asm 2: mov $2036477234,>in10=%r8 mov $2036477234,%r8 # qhasm: in15 = 1797285236 # asm 1: mov $1797285236,>in15=int64#7 # asm 2: mov $1797285236,>in15=%rax mov $1797285236,%rax # qhasm: ((uint32 *)&x0)[0] = in0 # asm 1: movl x0=stack128#4 # asm 2: movl x0=48(%rsp) movl %edx,48(%rsp) # qhasm: ((uint32 *)&x0)[1] = in5 # asm 1: movl z0=int6464#1 # asm 2: movdqa z0=%xmm0 movdqa 48(%rsp),%xmm0 # qhasm: z5 = z0[1,1,1,1] # asm 1: pshufd $0x55,z5=int6464#2 # asm 2: pshufd $0x55,z5=%xmm1 pshufd $0x55,%xmm0,%xmm1 # qhasm: z10 = z0[2,2,2,2] # asm 1: pshufd $0xaa,z10=int6464#3 # asm 2: pshufd $0xaa,z10=%xmm2 pshufd $0xaa,%xmm0,%xmm2 # qhasm: z15 = z0[3,3,3,3] # asm 1: pshufd $0xff,z15=int6464#4 # asm 2: pshufd $0xff,z15=%xmm3 pshufd $0xff,%xmm0,%xmm3 # qhasm: z0 = z0[0,0,0,0] # asm 1: pshufd $0x00,z0=int6464#1 # asm 2: pshufd $0x00,z0=%xmm0 pshufd $0x00,%xmm0,%xmm0 # qhasm: orig5 = z5 # asm 1: movdqa orig5=stack128#5 # asm 2: movdqa orig5=64(%rsp) movdqa %xmm1,64(%rsp) # qhasm: orig10 = z10 # asm 1: movdqa orig10=stack128#6 # asm 2: movdqa orig10=80(%rsp) movdqa %xmm2,80(%rsp) # qhasm: orig15 = z15 # asm 1: movdqa orig15=stack128#7 # asm 2: movdqa orig15=96(%rsp) movdqa %xmm3,96(%rsp) # qhasm: orig0 = z0 # asm 1: movdqa orig0=stack128#8 # asm 2: movdqa orig0=112(%rsp) movdqa %xmm0,112(%rsp) # qhasm: z1 = x1 # asm 1: movdqa z1=int6464#1 # asm 2: movdqa z1=%xmm0 movdqa 0(%rsp),%xmm0 # qhasm: z6 = z1[2,2,2,2] # asm 1: pshufd $0xaa,z6=int6464#2 # asm 2: pshufd $0xaa,z6=%xmm1 pshufd $0xaa,%xmm0,%xmm1 # qhasm: z11 = z1[3,3,3,3] # asm 1: pshufd $0xff,z11=int6464#3 # asm 2: pshufd $0xff,z11=%xmm2 pshufd $0xff,%xmm0,%xmm2 # qhasm: z12 = z1[0,0,0,0] # asm 1: pshufd $0x00,z12=int6464#4 # asm 2: pshufd $0x00,z12=%xmm3 pshufd $0x00,%xmm0,%xmm3 # qhasm: z1 = z1[1,1,1,1] # asm 1: pshufd $0x55,z1=int6464#1 # asm 2: pshufd $0x55,z1=%xmm0 pshufd $0x55,%xmm0,%xmm0 # qhasm: orig6 = z6 # asm 1: movdqa orig6=stack128#9 # asm 2: movdqa orig6=128(%rsp) movdqa %xmm1,128(%rsp) # qhasm: orig11 = z11 # asm 1: movdqa orig11=stack128#10 # asm 2: movdqa orig11=144(%rsp) movdqa %xmm2,144(%rsp) # qhasm: orig12 = z12 # asm 1: movdqa orig12=stack128#11 # asm 2: movdqa orig12=160(%rsp) movdqa %xmm3,160(%rsp) # qhasm: orig1 = z1 # asm 1: movdqa orig1=stack128#12 # asm 2: movdqa orig1=176(%rsp) movdqa %xmm0,176(%rsp) # qhasm: z2 = x2 # asm 1: movdqa z2=int6464#1 # asm 2: movdqa z2=%xmm0 movdqa 16(%rsp),%xmm0 # qhasm: z7 = z2[3,3,3,3] # asm 1: pshufd $0xff,z7=int6464#2 # asm 2: pshufd $0xff,z7=%xmm1 pshufd $0xff,%xmm0,%xmm1 # qhasm: z13 = z2[1,1,1,1] # asm 1: pshufd $0x55,z13=int6464#3 # asm 2: pshufd $0x55,z13=%xmm2 pshufd $0x55,%xmm0,%xmm2 # qhasm: z2 = z2[2,2,2,2] # asm 1: pshufd $0xaa,z2=int6464#1 # asm 2: pshufd $0xaa,z2=%xmm0 pshufd $0xaa,%xmm0,%xmm0 # qhasm: orig7 = z7 # asm 1: movdqa orig7=stack128#13 # asm 2: movdqa orig7=192(%rsp) movdqa %xmm1,192(%rsp) # qhasm: orig13 = z13 # asm 1: movdqa orig13=stack128#14 # asm 2: movdqa orig13=208(%rsp) movdqa %xmm2,208(%rsp) # qhasm: orig2 = z2 # asm 1: movdqa orig2=stack128#15 # asm 2: movdqa orig2=224(%rsp) movdqa %xmm0,224(%rsp) # qhasm: z3 = x3 # asm 1: movdqa z3=int6464#1 # asm 2: movdqa z3=%xmm0 movdqa 32(%rsp),%xmm0 # qhasm: z4 = z3[0,0,0,0] # asm 1: pshufd $0x00,z4=int6464#2 # asm 2: pshufd $0x00,z4=%xmm1 pshufd $0x00,%xmm0,%xmm1 # qhasm: z14 = z3[2,2,2,2] # asm 1: pshufd $0xaa,z14=int6464#3 # asm 2: pshufd $0xaa,z14=%xmm2 pshufd $0xaa,%xmm0,%xmm2 # qhasm: z3 = z3[3,3,3,3] # asm 1: pshufd $0xff,z3=int6464#1 # asm 2: pshufd $0xff,z3=%xmm0 pshufd $0xff,%xmm0,%xmm0 # qhasm: orig4 = z4 # asm 1: movdqa orig4=stack128#16 # asm 2: movdqa orig4=240(%rsp) movdqa %xmm1,240(%rsp) # qhasm: orig14 = z14 # asm 1: movdqa orig14=stack128#17 # asm 2: movdqa orig14=256(%rsp) movdqa %xmm2,256(%rsp) # qhasm: orig3 = z3 # asm 1: movdqa orig3=stack128#18 # asm 2: movdqa orig3=272(%rsp) movdqa %xmm0,272(%rsp) # qhasm: bytesatleast256: ._bytesatleast256: # qhasm: in8 = ((uint32 *)&x2)[0] # asm 1: movl in8=int64#3d # asm 2: movl in8=%edx movl 16(%rsp),%edx # qhasm: in9 = ((uint32 *)&x3)[1] # asm 1: movl 4+in9=int64#4d # asm 2: movl 4+in9=%ecx movl 4+32(%rsp),%ecx # qhasm: ((uint32 *) &orig8)[0] = in8 # asm 1: movl orig8=stack128#19 # asm 2: movl orig8=288(%rsp) movl %edx,288(%rsp) # qhasm: ((uint32 *) &orig9)[0] = in9 # asm 1: movl orig9=stack128#20 # asm 2: movl orig9=304(%rsp) movl %ecx,304(%rsp) # qhasm: in8 += 1 # asm 1: add $1,in9=int64#4 # asm 2: mov in9=%rcx mov %rdx,%rcx # qhasm: (uint64) in9 >>= 32 # asm 1: shr $32,in9=int64#4 # asm 2: mov in9=%rcx mov %rdx,%rcx # qhasm: (uint64) in9 >>= 32 # asm 1: shr $32,in9=int64#4 # asm 2: mov in9=%rcx mov %rdx,%rcx # qhasm: (uint64) in9 >>= 32 # asm 1: shr $32,in9=int64#4 # asm 2: mov in9=%rcx mov %rdx,%rcx # qhasm: (uint64) in9 >>= 32 # asm 1: shr $32,x2=stack128#2 # asm 2: movl x2=16(%rsp) movl %edx,16(%rsp) # qhasm: ((uint32 *)&x3)[1] = in9 # asm 1: movl bytes_backup=stack64#8 # asm 2: movq bytes_backup=408(%rsp) movq %r9,408(%rsp) # qhasm: i = 12 # asm 1: mov $12,>i=int64#3 # asm 2: mov $12,>i=%rdx mov $12,%rdx # qhasm: z5 = orig5 # asm 1: movdqa z5=int6464#1 # asm 2: movdqa z5=%xmm0 movdqa 64(%rsp),%xmm0 # qhasm: z10 = orig10 # asm 1: movdqa z10=int6464#2 # asm 2: movdqa z10=%xmm1 movdqa 80(%rsp),%xmm1 # qhasm: z15 = orig15 # asm 1: movdqa z15=int6464#3 # asm 2: movdqa z15=%xmm2 movdqa 96(%rsp),%xmm2 # qhasm: z14 = orig14 # asm 1: movdqa z14=int6464#4 # asm 2: movdqa z14=%xmm3 movdqa 256(%rsp),%xmm3 # qhasm: z3 = orig3 # asm 1: movdqa z3=int6464#5 # asm 2: movdqa z3=%xmm4 movdqa 272(%rsp),%xmm4 # qhasm: z6 = orig6 # asm 1: movdqa z6=int6464#6 # asm 2: movdqa z6=%xmm5 movdqa 128(%rsp),%xmm5 # qhasm: z11 = orig11 # asm 1: movdqa z11=int6464#7 # asm 2: movdqa z11=%xmm6 movdqa 144(%rsp),%xmm6 # qhasm: z1 = orig1 # asm 1: movdqa z1=int6464#8 # asm 2: movdqa z1=%xmm7 movdqa 176(%rsp),%xmm7 # qhasm: z7 = orig7 # asm 1: movdqa z7=int6464#9 # asm 2: movdqa z7=%xmm8 movdqa 192(%rsp),%xmm8 # qhasm: z13 = orig13 # asm 1: movdqa z13=int6464#10 # asm 2: movdqa z13=%xmm9 movdqa 208(%rsp),%xmm9 # qhasm: z2 = orig2 # asm 1: movdqa z2=int6464#11 # asm 2: movdqa z2=%xmm10 movdqa 224(%rsp),%xmm10 # qhasm: z9 = orig9 # asm 1: movdqa z9=int6464#12 # asm 2: movdqa z9=%xmm11 movdqa 304(%rsp),%xmm11 # qhasm: z0 = orig0 # asm 1: movdqa z0=int6464#13 # asm 2: movdqa z0=%xmm12 movdqa 112(%rsp),%xmm12 # qhasm: z12 = orig12 # asm 1: movdqa z12=int6464#14 # asm 2: movdqa z12=%xmm13 movdqa 160(%rsp),%xmm13 # qhasm: z4 = orig4 # asm 1: movdqa z4=int6464#15 # asm 2: movdqa z4=%xmm14 movdqa 240(%rsp),%xmm14 # qhasm: z8 = orig8 # asm 1: movdqa z8=int6464#16 # asm 2: movdqa z8=%xmm15 movdqa 288(%rsp),%xmm15 # qhasm: mainloop1: ._mainloop1: # qhasm: z10_stack = z10 # asm 1: movdqa z10_stack=stack128#21 # asm 2: movdqa z10_stack=320(%rsp) movdqa %xmm1,320(%rsp) # qhasm: z15_stack = z15 # asm 1: movdqa z15_stack=stack128#22 # asm 2: movdqa z15_stack=336(%rsp) movdqa %xmm2,336(%rsp) # qhasm: y4 = z12 # asm 1: movdqa y4=int6464#2 # asm 2: movdqa y4=%xmm1 movdqa %xmm13,%xmm1 # qhasm: uint32323232 y4 += z0 # asm 1: paddd r4=int6464#3 # asm 2: movdqa r4=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y4 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,y9=int6464#2 # asm 2: movdqa y9=%xmm1 movdqa %xmm7,%xmm1 # qhasm: uint32323232 y9 += z5 # asm 1: paddd r9=int6464#3 # asm 2: movdqa r9=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y9 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,y8=int6464#2 # asm 2: movdqa y8=%xmm1 movdqa %xmm12,%xmm1 # qhasm: uint32323232 y8 += z4 # asm 1: paddd r8=int6464#3 # asm 2: movdqa r8=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y8 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y13=int6464#2 # asm 2: movdqa y13=%xmm1 movdqa %xmm0,%xmm1 # qhasm: uint32323232 y13 += z9 # asm 1: paddd r13=int6464#3 # asm 2: movdqa r13=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y13 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y12=int6464#2 # asm 2: movdqa y12=%xmm1 movdqa %xmm14,%xmm1 # qhasm: uint32323232 y12 += z8 # asm 1: paddd r12=int6464#3 # asm 2: movdqa r12=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y12 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y1=int6464#2 # asm 2: movdqa y1=%xmm1 movdqa %xmm11,%xmm1 # qhasm: uint32323232 y1 += z13 # asm 1: paddd r1=int6464#3 # asm 2: movdqa r1=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y1 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y0=int6464#2 # asm 2: movdqa y0=%xmm1 movdqa %xmm15,%xmm1 # qhasm: uint32323232 y0 += z12 # asm 1: paddd r0=int6464#3 # asm 2: movdqa r0=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y0 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,z10=int6464#2 # asm 2: movdqa z10=%xmm1 movdqa 320(%rsp),%xmm1 # qhasm: z0_stack = z0 # asm 1: movdqa z0_stack=stack128#21 # asm 2: movdqa z0_stack=320(%rsp) movdqa %xmm12,320(%rsp) # qhasm: y5 = z13 # asm 1: movdqa y5=int6464#3 # asm 2: movdqa y5=%xmm2 movdqa %xmm9,%xmm2 # qhasm: uint32323232 y5 += z1 # asm 1: paddd r5=int6464#13 # asm 2: movdqa r5=%xmm12 movdqa %xmm2,%xmm12 # qhasm: uint32323232 y5 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,y14=int6464#3 # asm 2: movdqa y14=%xmm2 movdqa %xmm5,%xmm2 # qhasm: uint32323232 y14 += z10 # asm 1: paddd r14=int6464#13 # asm 2: movdqa r14=%xmm12 movdqa %xmm2,%xmm12 # qhasm: uint32323232 y14 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,z15=int6464#3 # asm 2: movdqa z15=%xmm2 movdqa 336(%rsp),%xmm2 # qhasm: z5_stack = z5 # asm 1: movdqa z5_stack=stack128#22 # asm 2: movdqa z5_stack=336(%rsp) movdqa %xmm0,336(%rsp) # qhasm: y3 = z11 # asm 1: movdqa y3=int6464#1 # asm 2: movdqa y3=%xmm0 movdqa %xmm6,%xmm0 # qhasm: uint32323232 y3 += z15 # asm 1: paddd r3=int6464#13 # asm 2: movdqa r3=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y3 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,y2=int6464#1 # asm 2: movdqa y2=%xmm0 movdqa %xmm1,%xmm0 # qhasm: uint32323232 y2 += z14 # asm 1: paddd r2=int6464#13 # asm 2: movdqa r2=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y2 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y7=int6464#1 # asm 2: movdqa y7=%xmm0 movdqa %xmm2,%xmm0 # qhasm: uint32323232 y7 += z3 # asm 1: paddd r7=int6464#13 # asm 2: movdqa r7=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y7 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y6=int6464#1 # asm 2: movdqa y6=%xmm0 movdqa %xmm3,%xmm0 # qhasm: uint32323232 y6 += z2 # asm 1: paddd r6=int6464#13 # asm 2: movdqa r6=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y6 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y11=int6464#1 # asm 2: movdqa y11=%xmm0 movdqa %xmm4,%xmm0 # qhasm: uint32323232 y11 += z7 # asm 1: paddd r11=int6464#13 # asm 2: movdqa r11=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y11 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y10=int6464#1 # asm 2: movdqa y10=%xmm0 movdqa %xmm10,%xmm0 # qhasm: uint32323232 y10 += z6 # asm 1: paddd r10=int6464#13 # asm 2: movdqa r10=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y10 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,z0=int6464#1 # asm 2: movdqa z0=%xmm0 movdqa 320(%rsp),%xmm0 # qhasm: z10_stack = z10 # asm 1: movdqa z10_stack=stack128#21 # asm 2: movdqa z10_stack=320(%rsp) movdqa %xmm1,320(%rsp) # qhasm: y1 = z3 # asm 1: movdqa y1=int6464#2 # asm 2: movdqa y1=%xmm1 movdqa %xmm4,%xmm1 # qhasm: uint32323232 y1 += z0 # asm 1: paddd r1=int6464#13 # asm 2: movdqa r1=%xmm12 movdqa %xmm1,%xmm12 # qhasm: uint32323232 y1 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,y15=int6464#2 # asm 2: movdqa y15=%xmm1 movdqa %xmm8,%xmm1 # qhasm: uint32323232 y15 += z11 # asm 1: paddd r15=int6464#13 # asm 2: movdqa r15=%xmm12 movdqa %xmm1,%xmm12 # qhasm: uint32323232 y15 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,z5=int6464#13 # asm 2: movdqa z5=%xmm12 movdqa 336(%rsp),%xmm12 # qhasm: z15_stack = z15 # asm 1: movdqa z15_stack=stack128#22 # asm 2: movdqa z15_stack=336(%rsp) movdqa %xmm2,336(%rsp) # qhasm: y6 = z4 # asm 1: movdqa y6=int6464#2 # asm 2: movdqa y6=%xmm1 movdqa %xmm14,%xmm1 # qhasm: uint32323232 y6 += z5 # asm 1: paddd r6=int6464#3 # asm 2: movdqa r6=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y6 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,y2=int6464#2 # asm 2: movdqa y2=%xmm1 movdqa %xmm0,%xmm1 # qhasm: uint32323232 y2 += z1 # asm 1: paddd r2=int6464#3 # asm 2: movdqa r2=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y2 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y7=int6464#2 # asm 2: movdqa y7=%xmm1 movdqa %xmm12,%xmm1 # qhasm: uint32323232 y7 += z6 # asm 1: paddd r7=int6464#3 # asm 2: movdqa r7=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y7 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y3=int6464#2 # asm 2: movdqa y3=%xmm1 movdqa %xmm7,%xmm1 # qhasm: uint32323232 y3 += z2 # asm 1: paddd r3=int6464#3 # asm 2: movdqa r3=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y3 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y4=int6464#2 # asm 2: movdqa y4=%xmm1 movdqa %xmm5,%xmm1 # qhasm: uint32323232 y4 += z7 # asm 1: paddd r4=int6464#3 # asm 2: movdqa r4=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y4 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y0=int6464#2 # asm 2: movdqa y0=%xmm1 movdqa %xmm10,%xmm1 # qhasm: uint32323232 y0 += z3 # asm 1: paddd r0=int6464#3 # asm 2: movdqa r0=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y0 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,z10=int6464#2 # asm 2: movdqa z10=%xmm1 movdqa 320(%rsp),%xmm1 # qhasm: z0_stack = z0 # asm 1: movdqa z0_stack=stack128#21 # asm 2: movdqa z0_stack=320(%rsp) movdqa %xmm0,320(%rsp) # qhasm: y5 = z7 # asm 1: movdqa y5=int6464#1 # asm 2: movdqa y5=%xmm0 movdqa %xmm8,%xmm0 # qhasm: uint32323232 y5 += z4 # asm 1: paddd r5=int6464#3 # asm 2: movdqa r5=%xmm2 movdqa %xmm0,%xmm2 # qhasm: uint32323232 y5 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,y11=int6464#1 # asm 2: movdqa y11=%xmm0 movdqa %xmm11,%xmm0 # qhasm: uint32323232 y11 += z10 # asm 1: paddd r11=int6464#3 # asm 2: movdqa r11=%xmm2 movdqa %xmm0,%xmm2 # qhasm: uint32323232 y11 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,z15=int6464#3 # asm 2: movdqa z15=%xmm2 movdqa 336(%rsp),%xmm2 # qhasm: z5_stack = z5 # asm 1: movdqa z5_stack=stack128#22 # asm 2: movdqa z5_stack=336(%rsp) movdqa %xmm12,336(%rsp) # qhasm: y12 = z14 # asm 1: movdqa y12=int6464#1 # asm 2: movdqa y12=%xmm0 movdqa %xmm3,%xmm0 # qhasm: uint32323232 y12 += z15 # asm 1: paddd r12=int6464#13 # asm 2: movdqa r12=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y12 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,y8=int6464#1 # asm 2: movdqa y8=%xmm0 movdqa %xmm1,%xmm0 # qhasm: uint32323232 y8 += z11 # asm 1: paddd r8=int6464#13 # asm 2: movdqa r8=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y8 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y13=int6464#1 # asm 2: movdqa y13=%xmm0 movdqa %xmm2,%xmm0 # qhasm: uint32323232 y13 += z12 # asm 1: paddd r13=int6464#13 # asm 2: movdqa r13=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y13 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y9=int6464#1 # asm 2: movdqa y9=%xmm0 movdqa %xmm6,%xmm0 # qhasm: uint32323232 y9 += z8 # asm 1: paddd r9=int6464#13 # asm 2: movdqa r9=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y9 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y14=int6464#1 # asm 2: movdqa y14=%xmm0 movdqa %xmm13,%xmm0 # qhasm: uint32323232 y14 += z13 # asm 1: paddd r14=int6464#13 # asm 2: movdqa r14=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y14 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y10=int6464#1 # asm 2: movdqa y10=%xmm0 movdqa %xmm15,%xmm0 # qhasm: uint32323232 y10 += z9 # asm 1: paddd r10=int6464#13 # asm 2: movdqa r10=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y10 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,y15=int6464#1 # asm 2: movdqa y15=%xmm0 movdqa %xmm9,%xmm0 # qhasm: uint32323232 y15 += z14 # asm 1: paddd r15=int6464#13 # asm 2: movdqa r15=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y15 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,z0=int6464#13 # asm 2: movdqa z0=%xmm12 movdqa 320(%rsp),%xmm12 # qhasm: z5 = z5_stack # asm 1: movdqa z5=int6464#1 # asm 2: movdqa z5=%xmm0 movdqa 336(%rsp),%xmm0 # qhasm: unsigned>? i -= 2 # asm 1: sub $2, ja ._mainloop1 # qhasm: uint32323232 z0 += orig0 # asm 1: paddd in0=int64#3 # asm 2: movd in0=%rdx movd %xmm12,%rdx # qhasm: in1 = z1 # asm 1: movd in1=int64#4 # asm 2: movd in1=%rcx movd %xmm7,%rcx # qhasm: in2 = z2 # asm 1: movd in2=int64#5 # asm 2: movd in2=%r8 movd %xmm10,%r8 # qhasm: in3 = z3 # asm 1: movd in3=int64#6 # asm 2: movd in3=%r9 movd %xmm4,%r9 # qhasm: z0 <<<= 96 # asm 1: pshufd $0x39,in0=int64#3 # asm 2: movd in0=%rdx movd %xmm12,%rdx # qhasm: in1 = z1 # asm 1: movd in1=int64#4 # asm 2: movd in1=%rcx movd %xmm7,%rcx # qhasm: in2 = z2 # asm 1: movd in2=int64#5 # asm 2: movd in2=%r8 movd %xmm10,%r8 # qhasm: in3 = z3 # asm 1: movd in3=int64#6 # asm 2: movd in3=%r9 movd %xmm4,%r9 # qhasm: z0 <<<= 96 # asm 1: pshufd $0x39,in0=int64#3 # asm 2: movd in0=%rdx movd %xmm12,%rdx # qhasm: in1 = z1 # asm 1: movd in1=int64#4 # asm 2: movd in1=%rcx movd %xmm7,%rcx # qhasm: in2 = z2 # asm 1: movd in2=int64#5 # asm 2: movd in2=%r8 movd %xmm10,%r8 # qhasm: in3 = z3 # asm 1: movd in3=int64#6 # asm 2: movd in3=%r9 movd %xmm4,%r9 # qhasm: z0 <<<= 96 # asm 1: pshufd $0x39,in0=int64#3 # asm 2: movd in0=%rdx movd %xmm12,%rdx # qhasm: in1 = z1 # asm 1: movd in1=int64#4 # asm 2: movd in1=%rcx movd %xmm7,%rcx # qhasm: in2 = z2 # asm 1: movd in2=int64#5 # asm 2: movd in2=%r8 movd %xmm10,%r8 # qhasm: in3 = z3 # asm 1: movd in3=int64#6 # asm 2: movd in3=%r9 movd %xmm4,%r9 # qhasm: (uint32) in0 ^= *(uint32 *) (m + 192) # asm 1: xorl 192(in4=int64#3 # asm 2: movd in4=%rdx movd %xmm14,%rdx # qhasm: in5 = z5 # asm 1: movd in5=int64#4 # asm 2: movd in5=%rcx movd %xmm0,%rcx # qhasm: in6 = z6 # asm 1: movd in6=int64#5 # asm 2: movd in6=%r8 movd %xmm5,%r8 # qhasm: in7 = z7 # asm 1: movd in7=int64#6 # asm 2: movd in7=%r9 movd %xmm8,%r9 # qhasm: z4 <<<= 96 # asm 1: pshufd $0x39,in4=int64#3 # asm 2: movd in4=%rdx movd %xmm14,%rdx # qhasm: in5 = z5 # asm 1: movd in5=int64#4 # asm 2: movd in5=%rcx movd %xmm0,%rcx # qhasm: in6 = z6 # asm 1: movd in6=int64#5 # asm 2: movd in6=%r8 movd %xmm5,%r8 # qhasm: in7 = z7 # asm 1: movd in7=int64#6 # asm 2: movd in7=%r9 movd %xmm8,%r9 # qhasm: z4 <<<= 96 # asm 1: pshufd $0x39,in4=int64#3 # asm 2: movd in4=%rdx movd %xmm14,%rdx # qhasm: in5 = z5 # asm 1: movd in5=int64#4 # asm 2: movd in5=%rcx movd %xmm0,%rcx # qhasm: in6 = z6 # asm 1: movd in6=int64#5 # asm 2: movd in6=%r8 movd %xmm5,%r8 # qhasm: in7 = z7 # asm 1: movd in7=int64#6 # asm 2: movd in7=%r9 movd %xmm8,%r9 # qhasm: z4 <<<= 96 # asm 1: pshufd $0x39,in4=int64#3 # asm 2: movd in4=%rdx movd %xmm14,%rdx # qhasm: in5 = z5 # asm 1: movd in5=int64#4 # asm 2: movd in5=%rcx movd %xmm0,%rcx # qhasm: in6 = z6 # asm 1: movd in6=int64#5 # asm 2: movd in6=%r8 movd %xmm5,%r8 # qhasm: in7 = z7 # asm 1: movd in7=int64#6 # asm 2: movd in7=%r9 movd %xmm8,%r9 # qhasm: (uint32) in4 ^= *(uint32 *) (m + 208) # asm 1: xorl 208(in8=int64#3 # asm 2: movd in8=%rdx movd %xmm15,%rdx # qhasm: in9 = z9 # asm 1: movd in9=int64#4 # asm 2: movd in9=%rcx movd %xmm11,%rcx # qhasm: in10 = z10 # asm 1: movd in10=int64#5 # asm 2: movd in10=%r8 movd %xmm1,%r8 # qhasm: in11 = z11 # asm 1: movd in11=int64#6 # asm 2: movd in11=%r9 movd %xmm6,%r9 # qhasm: z8 <<<= 96 # asm 1: pshufd $0x39,in8=int64#3 # asm 2: movd in8=%rdx movd %xmm15,%rdx # qhasm: in9 = z9 # asm 1: movd in9=int64#4 # asm 2: movd in9=%rcx movd %xmm11,%rcx # qhasm: in10 = z10 # asm 1: movd in10=int64#5 # asm 2: movd in10=%r8 movd %xmm1,%r8 # qhasm: in11 = z11 # asm 1: movd in11=int64#6 # asm 2: movd in11=%r9 movd %xmm6,%r9 # qhasm: z8 <<<= 96 # asm 1: pshufd $0x39,in8=int64#3 # asm 2: movd in8=%rdx movd %xmm15,%rdx # qhasm: in9 = z9 # asm 1: movd in9=int64#4 # asm 2: movd in9=%rcx movd %xmm11,%rcx # qhasm: in10 = z10 # asm 1: movd in10=int64#5 # asm 2: movd in10=%r8 movd %xmm1,%r8 # qhasm: in11 = z11 # asm 1: movd in11=int64#6 # asm 2: movd in11=%r9 movd %xmm6,%r9 # qhasm: z8 <<<= 96 # asm 1: pshufd $0x39,in8=int64#3 # asm 2: movd in8=%rdx movd %xmm15,%rdx # qhasm: in9 = z9 # asm 1: movd in9=int64#4 # asm 2: movd in9=%rcx movd %xmm11,%rcx # qhasm: in10 = z10 # asm 1: movd in10=int64#5 # asm 2: movd in10=%r8 movd %xmm1,%r8 # qhasm: in11 = z11 # asm 1: movd in11=int64#6 # asm 2: movd in11=%r9 movd %xmm6,%r9 # qhasm: (uint32) in8 ^= *(uint32 *) (m + 224) # asm 1: xorl 224(in12=int64#3 # asm 2: movd in12=%rdx movd %xmm13,%rdx # qhasm: in13 = z13 # asm 1: movd in13=int64#4 # asm 2: movd in13=%rcx movd %xmm9,%rcx # qhasm: in14 = z14 # asm 1: movd in14=int64#5 # asm 2: movd in14=%r8 movd %xmm3,%r8 # qhasm: in15 = z15 # asm 1: movd in15=int64#6 # asm 2: movd in15=%r9 movd %xmm2,%r9 # qhasm: z12 <<<= 96 # asm 1: pshufd $0x39,in12=int64#3 # asm 2: movd in12=%rdx movd %xmm13,%rdx # qhasm: in13 = z13 # asm 1: movd in13=int64#4 # asm 2: movd in13=%rcx movd %xmm9,%rcx # qhasm: in14 = z14 # asm 1: movd in14=int64#5 # asm 2: movd in14=%r8 movd %xmm3,%r8 # qhasm: in15 = z15 # asm 1: movd in15=int64#6 # asm 2: movd in15=%r9 movd %xmm2,%r9 # qhasm: z12 <<<= 96 # asm 1: pshufd $0x39,in12=int64#3 # asm 2: movd in12=%rdx movd %xmm13,%rdx # qhasm: in13 = z13 # asm 1: movd in13=int64#4 # asm 2: movd in13=%rcx movd %xmm9,%rcx # qhasm: in14 = z14 # asm 1: movd in14=int64#5 # asm 2: movd in14=%r8 movd %xmm3,%r8 # qhasm: in15 = z15 # asm 1: movd in15=int64#6 # asm 2: movd in15=%r9 movd %xmm2,%r9 # qhasm: z12 <<<= 96 # asm 1: pshufd $0x39,in12=int64#3 # asm 2: movd in12=%rdx movd %xmm13,%rdx # qhasm: in13 = z13 # asm 1: movd in13=int64#4 # asm 2: movd in13=%rcx movd %xmm9,%rcx # qhasm: in14 = z14 # asm 1: movd in14=int64#5 # asm 2: movd in14=%r8 movd %xmm3,%r8 # qhasm: in15 = z15 # asm 1: movd in15=int64#6 # asm 2: movd in15=%r9 movd %xmm2,%r9 # qhasm: (uint32) in12 ^= *(uint32 *) (m + 240) # asm 1: xorl 240(bytes=int64#6 # asm 2: movq bytes=%r9 movq 408(%rsp),%r9 # qhasm: bytes -= 256 # asm 1: sub $256,? bytes - 0 # asm 1: cmp $0, jbe ._done # comment:fp stack unchanged by fallthrough # qhasm: bytesbetween1and255: ._bytesbetween1and255: # qhasm: unsignedctarget=int64#3 # asm 2: mov ctarget=%rdx mov %rdi,%rdx # qhasm: out = &tmp # asm 1: leaq out=int64#1 # asm 2: leaq out=%rdi leaq 416(%rsp),%rdi # qhasm: i = bytes # asm 1: mov i=int64#4 # asm 2: mov i=%rcx mov %r9,%rcx # qhasm: while (i) { *out++ = *m++; --i } rep movsb # qhasm: out = &tmp # asm 1: leaq out=int64#1 # asm 2: leaq out=%rdi leaq 416(%rsp),%rdi # qhasm: m = &tmp # asm 1: leaq m=int64#2 # asm 2: leaq m=%rsi leaq 416(%rsp),%rsi # comment:fp stack unchanged by fallthrough # qhasm: nocopy: ._nocopy: # qhasm: bytes_backup = bytes # asm 1: movq bytes_backup=stack64#8 # asm 2: movq bytes_backup=408(%rsp) movq %r9,408(%rsp) # qhasm: diag0 = x0 # asm 1: movdqa diag0=int6464#1 # asm 2: movdqa diag0=%xmm0 movdqa 48(%rsp),%xmm0 # qhasm: diag1 = x1 # asm 1: movdqa diag1=int6464#2 # asm 2: movdqa diag1=%xmm1 movdqa 0(%rsp),%xmm1 # qhasm: diag2 = x2 # asm 1: movdqa diag2=int6464#3 # asm 2: movdqa diag2=%xmm2 movdqa 16(%rsp),%xmm2 # qhasm: diag3 = x3 # asm 1: movdqa diag3=int6464#4 # asm 2: movdqa diag3=%xmm3 movdqa 32(%rsp),%xmm3 # qhasm: a0 = diag1 # asm 1: movdqa a0=int6464#5 # asm 2: movdqa a0=%xmm4 movdqa %xmm1,%xmm4 # qhasm: i = 12 # asm 1: mov $12,>i=int64#4 # asm 2: mov $12,>i=%rcx mov $12,%rcx # qhasm: mainloop2: ._mainloop2: # qhasm: uint32323232 a0 += diag0 # asm 1: paddd a1=int6464#6 # asm 2: movdqa a1=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b0 = a0 # asm 1: movdqa b0=int6464#7 # asm 2: movdqa b0=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a0 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a2=int6464#5 # asm 2: movdqa a2=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b1 = a1 # asm 1: movdqa b1=int6464#7 # asm 2: movdqa b1=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a1 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a3=int6464#6 # asm 2: movdqa a3=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b2 = a2 # asm 1: movdqa b2=int6464#7 # asm 2: movdqa b2=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a2 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,a4=int6464#5 # asm 2: movdqa a4=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b3 = a3 # asm 1: movdqa b3=int6464#7 # asm 2: movdqa b3=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a3 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,a5=int6464#6 # asm 2: movdqa a5=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b4 = a4 # asm 1: movdqa b4=int6464#7 # asm 2: movdqa b4=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a4 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a6=int6464#5 # asm 2: movdqa a6=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b5 = a5 # asm 1: movdqa b5=int6464#7 # asm 2: movdqa b5=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a5 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a7=int6464#6 # asm 2: movdqa a7=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b6 = a6 # asm 1: movdqa b6=int6464#7 # asm 2: movdqa b6=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a6 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,a0=int6464#5 # asm 2: movdqa a0=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b7 = a7 # asm 1: movdqa b7=int6464#7 # asm 2: movdqa b7=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a7 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,a1=int6464#6 # asm 2: movdqa a1=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b0 = a0 # asm 1: movdqa b0=int6464#7 # asm 2: movdqa b0=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a0 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a2=int6464#5 # asm 2: movdqa a2=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b1 = a1 # asm 1: movdqa b1=int6464#7 # asm 2: movdqa b1=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a1 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a3=int6464#6 # asm 2: movdqa a3=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b2 = a2 # asm 1: movdqa b2=int6464#7 # asm 2: movdqa b2=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a2 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,a4=int6464#5 # asm 2: movdqa a4=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b3 = a3 # asm 1: movdqa b3=int6464#7 # asm 2: movdqa b3=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a3 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,a5=int6464#6 # asm 2: movdqa a5=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b4 = a4 # asm 1: movdqa b4=int6464#7 # asm 2: movdqa b4=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a4 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a6=int6464#5 # asm 2: movdqa a6=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b5 = a5 # asm 1: movdqa b5=int6464#7 # asm 2: movdqa b5=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a5 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a7=int6464#6 # asm 2: movdqa a7=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b6 = a6 # asm 1: movdqa b6=int6464#7 # asm 2: movdqa b6=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a6 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,? i -= 4 # asm 1: sub $4,a0=int6464#5 # asm 2: movdqa a0=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b7 = a7 # asm 1: movdqa b7=int6464#7 # asm 2: movdqa b7=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a7 <<= 18 # asm 1: pslld $18,b0=int6464#8,>b0=int6464#8 # asm 2: pxor >b0=%xmm7,>b0=%xmm7 pxor %xmm7,%xmm7 # qhasm: uint32323232 b7 >>= 14 # asm 1: psrld $14, ja ._mainloop2 # qhasm: uint32323232 diag0 += x0 # asm 1: paddd in0=int64#4 # asm 2: movd in0=%rcx movd %xmm0,%rcx # qhasm: in12 = diag1 # asm 1: movd in12=int64#5 # asm 2: movd in12=%r8 movd %xmm1,%r8 # qhasm: in8 = diag2 # asm 1: movd in8=int64#6 # asm 2: movd in8=%r9 movd %xmm2,%r9 # qhasm: in4 = diag3 # asm 1: movd in4=int64#7 # asm 2: movd in4=%rax movd %xmm3,%rax # qhasm: diag0 <<<= 96 # asm 1: pshufd $0x39,in5=int64#4 # asm 2: movd in5=%rcx movd %xmm0,%rcx # qhasm: in1 = diag1 # asm 1: movd in1=int64#5 # asm 2: movd in1=%r8 movd %xmm1,%r8 # qhasm: in13 = diag2 # asm 1: movd in13=int64#6 # asm 2: movd in13=%r9 movd %xmm2,%r9 # qhasm: in9 = diag3 # asm 1: movd in9=int64#7 # asm 2: movd in9=%rax movd %xmm3,%rax # qhasm: diag0 <<<= 96 # asm 1: pshufd $0x39,in10=int64#4 # asm 2: movd in10=%rcx movd %xmm0,%rcx # qhasm: in6 = diag1 # asm 1: movd in6=int64#5 # asm 2: movd in6=%r8 movd %xmm1,%r8 # qhasm: in2 = diag2 # asm 1: movd in2=int64#6 # asm 2: movd in2=%r9 movd %xmm2,%r9 # qhasm: in14 = diag3 # asm 1: movd in14=int64#7 # asm 2: movd in14=%rax movd %xmm3,%rax # qhasm: diag0 <<<= 96 # asm 1: pshufd $0x39,in15=int64#4 # asm 2: movd in15=%rcx movd %xmm0,%rcx # qhasm: in11 = diag1 # asm 1: movd in11=int64#5 # asm 2: movd in11=%r8 movd %xmm1,%r8 # qhasm: in7 = diag2 # asm 1: movd in7=int64#6 # asm 2: movd in7=%r9 movd %xmm2,%r9 # qhasm: in3 = diag3 # asm 1: movd in3=int64#7 # asm 2: movd in3=%rax movd %xmm3,%rax # qhasm: (uint32) in15 ^= *(uint32 *) (m + 60) # asm 1: xorl 60(bytes=int64#6 # asm 2: movq bytes=%r9 movq 408(%rsp),%r9 # qhasm: in8 = ((uint32 *)&x2)[0] # asm 1: movl in8=int64#4d # asm 2: movl in8=%ecx movl 16(%rsp),%ecx # qhasm: in9 = ((uint32 *)&x3)[1] # asm 1: movl 4+in9=int64#5d # asm 2: movl 4+in9=%r8d movl 4+32(%rsp),%r8d # qhasm: in8 += 1 # asm 1: add $1,in9=int64#5 # asm 2: mov in9=%r8 mov %rcx,%r8 # qhasm: (uint64) in9 >>= 32 # asm 1: shr $32,x2=stack128#2 # asm 2: movl x2=16(%rsp) movl %ecx,16(%rsp) # qhasm: ((uint32 *)&x3)[1] = in9 # asm 1: movl ? unsigned ja ._bytesatleast65 # comment:fp stack unchanged by jump # qhasm: goto bytesatleast64 if !unsigned< jae ._bytesatleast64 # qhasm: m = out # asm 1: mov m=int64#2 # asm 2: mov m=%rsi mov %rdi,%rsi # qhasm: out = ctarget # asm 1: mov out=int64#1 # asm 2: mov out=%rdi mov %rdx,%rdi # qhasm: i = bytes # asm 1: mov i=int64#4 # asm 2: mov i=%rcx mov %r9,%rcx # qhasm: while (i) { *out++ = *m++; --i } rep movsb # comment:fp stack unchanged by fallthrough # qhasm: bytesatleast64: ._bytesatleast64: # comment:fp stack unchanged by fallthrough # qhasm: done: ._done: # qhasm: r11_caller = r11_stack # asm 1: movq r11_caller=int64#9 # asm 2: movq r11_caller=%r11 movq 352(%rsp),%r11 # qhasm: r12_caller = r12_stack # asm 1: movq r12_caller=int64#10 # asm 2: movq r12_caller=%r12 movq 360(%rsp),%r12 # qhasm: r13_caller = r13_stack # asm 1: movq r13_caller=int64#11 # asm 2: movq r13_caller=%r13 movq 368(%rsp),%r13 # qhasm: r14_caller = r14_stack # asm 1: movq r14_caller=int64#12 # asm 2: movq r14_caller=%r14 movq 376(%rsp),%r14 # qhasm: r15_caller = r15_stack # asm 1: movq r15_caller=int64#13 # asm 2: movq r15_caller=%r15 movq 384(%rsp),%r15 # qhasm: rbx_caller = rbx_stack # asm 1: movq rbx_caller=int64#14 # asm 2: movq rbx_caller=%rbx movq 392(%rsp),%rbx # qhasm: rbp_caller = rbp_stack # asm 1: movq rbp_caller=int64#15 # asm 2: movq rbp_caller=%rbp movq 400(%rsp),%rbp # qhasm: leave add %r11,%rsp xor %rax,%rax xor %rdx,%rdx ret # qhasm: bytesatleast65: ._bytesatleast65: # qhasm: bytes -= 64 # asm 1: sub $64,= 64) { crypto_core_salsa2012(c,in,k,sigma); u = 1; for (i = 8;i < 16;++i) { u += (unsigned int) in[i]; in[i] = u; u >>= 8; } clen -= 64; c += 64; } if (clen) { crypto_core_salsa2012(block,in,k,sigma); for (i = 0;i < clen;++i) c[i] = block[i]; } return 0; } curvedns-curvedns-0.87/nacl/crypto_stream/salsa2012/ref/xor.c000066400000000000000000000016651150631715100241100ustar00rootroot00000000000000/* version 20080913 D. J. Bernstein Public domain. */ #include "crypto_core_salsa2012.h" #include "crypto_stream.h" typedef unsigned int uint32; static const unsigned char sigma[16] = "expand 32-byte k"; int crypto_stream_xor( unsigned char *c, const unsigned char *m,unsigned long long mlen, const unsigned char *n, const unsigned char *k ) { unsigned char in[16]; unsigned char block[64]; int i; unsigned int u; if (!mlen) return 0; for (i = 0;i < 8;++i) in[i] = n[i]; for (i = 8;i < 16;++i) in[i] = 0; while (mlen >= 64) { crypto_core_salsa2012(block,in,k,sigma); for (i = 0;i < 64;++i) c[i] = m[i] ^ block[i]; u = 1; for (i = 8;i < 16;++i) { u += (unsigned int) in[i]; in[i] = u; u >>= 8; } mlen -= 64; c += 64; m += 64; } if (mlen) { crypto_core_salsa2012(block,in,k,sigma); for (i = 0;i < mlen;++i) c[i] = m[i] ^ block[i]; } return 0; } curvedns-curvedns-0.87/nacl/crypto_stream/salsa2012/used000066400000000000000000000000001150631715100232210ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_stream/salsa2012/x86_xmm5/000077500000000000000000000000001150631715100237435ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_stream/salsa2012/x86_xmm5/api.h000066400000000000000000000000671150631715100246700ustar00rootroot00000000000000#define CRYPTO_KEYBYTES 32 #define CRYPTO_NONCEBYTES 8 curvedns-curvedns-0.87/nacl/crypto_stream/salsa2012/x86_xmm5/stream.s000066400000000000000000004266041150631715100254360ustar00rootroot00000000000000 # qhasm: int32 a # qhasm: stack32 arg1 # qhasm: stack32 arg2 # qhasm: stack32 arg3 # qhasm: stack32 arg4 # qhasm: stack32 arg5 # qhasm: stack32 arg6 # qhasm: input arg1 # qhasm: input arg2 # qhasm: input arg3 # qhasm: input arg4 # qhasm: input arg5 # qhasm: input arg6 # qhasm: int32 eax # qhasm: int32 ebx # qhasm: int32 esi # qhasm: int32 edi # qhasm: int32 ebp # qhasm: caller eax # qhasm: caller ebx # qhasm: caller esi # qhasm: caller edi # qhasm: caller ebp # qhasm: int32 k # qhasm: int32 kbits # qhasm: int32 iv # qhasm: int32 i # qhasm: stack128 x0 # qhasm: stack128 x1 # qhasm: stack128 x2 # qhasm: stack128 x3 # qhasm: int32 m # qhasm: stack32 out_stack # qhasm: int32 out # qhasm: stack32 bytes_stack # qhasm: int32 bytes # qhasm: stack32 eax_stack # qhasm: stack32 ebx_stack # qhasm: stack32 esi_stack # qhasm: stack32 edi_stack # qhasm: stack32 ebp_stack # qhasm: int6464 diag0 # qhasm: int6464 diag1 # qhasm: int6464 diag2 # qhasm: int6464 diag3 # qhasm: int6464 a0 # qhasm: int6464 a1 # qhasm: int6464 a2 # qhasm: int6464 a3 # qhasm: int6464 a4 # qhasm: int6464 a5 # qhasm: int6464 a6 # qhasm: int6464 a7 # qhasm: int6464 b0 # qhasm: int6464 b1 # qhasm: int6464 b2 # qhasm: int6464 b3 # qhasm: int6464 b4 # qhasm: int6464 b5 # qhasm: int6464 b6 # qhasm: int6464 b7 # qhasm: int6464 z0 # qhasm: int6464 z1 # qhasm: int6464 z2 # qhasm: int6464 z3 # qhasm: int6464 z4 # qhasm: int6464 z5 # qhasm: int6464 z6 # qhasm: int6464 z7 # qhasm: int6464 z8 # qhasm: int6464 z9 # qhasm: int6464 z10 # qhasm: int6464 z11 # qhasm: int6464 z12 # qhasm: int6464 z13 # qhasm: int6464 z14 # qhasm: int6464 z15 # qhasm: stack128 z0_stack # qhasm: stack128 z1_stack # qhasm: stack128 z2_stack # qhasm: stack128 z3_stack # qhasm: stack128 z4_stack # qhasm: stack128 z5_stack # qhasm: stack128 z6_stack # qhasm: stack128 z7_stack # qhasm: stack128 z8_stack # qhasm: stack128 z9_stack # qhasm: stack128 z10_stack # qhasm: stack128 z11_stack # qhasm: stack128 z12_stack # qhasm: stack128 z13_stack # qhasm: stack128 z14_stack # qhasm: stack128 z15_stack # qhasm: stack128 orig0 # qhasm: stack128 orig1 # qhasm: stack128 orig2 # qhasm: stack128 orig3 # qhasm: stack128 orig4 # qhasm: stack128 orig5 # qhasm: stack128 orig6 # qhasm: stack128 orig7 # qhasm: stack128 orig8 # qhasm: stack128 orig9 # qhasm: stack128 orig10 # qhasm: stack128 orig11 # qhasm: stack128 orig12 # qhasm: stack128 orig13 # qhasm: stack128 orig14 # qhasm: stack128 orig15 # qhasm: int6464 p # qhasm: int6464 q # qhasm: int6464 r # qhasm: int6464 s # qhasm: int6464 t # qhasm: int6464 u # qhasm: int6464 v # qhasm: int6464 w # qhasm: int6464 mp # qhasm: int6464 mq # qhasm: int6464 mr # qhasm: int6464 ms # qhasm: int6464 mt # qhasm: int6464 mu # qhasm: int6464 mv # qhasm: int6464 mw # qhasm: int32 in0 # qhasm: int32 in1 # qhasm: int32 in2 # qhasm: int32 in3 # qhasm: int32 in4 # qhasm: int32 in5 # qhasm: int32 in6 # qhasm: int32 in7 # qhasm: int32 in8 # qhasm: int32 in9 # qhasm: int32 in10 # qhasm: int32 in11 # qhasm: int32 in12 # qhasm: int32 in13 # qhasm: int32 in14 # qhasm: int32 in15 # qhasm: stack512 tmp # qhasm: stack32 ctarget # qhasm: enter crypto_stream_salsa2012_x86_xmm5 .text .p2align 5 .globl _crypto_stream_salsa2012_x86_xmm5 .globl crypto_stream_salsa2012_x86_xmm5 _crypto_stream_salsa2012_x86_xmm5: crypto_stream_salsa2012_x86_xmm5: mov %esp,%eax and $31,%eax add $704,%eax sub %eax,%esp # qhasm: eax_stack = eax # asm 1: movl eax_stack=stack32#1 # asm 2: movl eax_stack=0(%esp) movl %eax,0(%esp) # qhasm: ebx_stack = ebx # asm 1: movl ebx_stack=stack32#2 # asm 2: movl ebx_stack=4(%esp) movl %ebx,4(%esp) # qhasm: esi_stack = esi # asm 1: movl esi_stack=stack32#3 # asm 2: movl esi_stack=8(%esp) movl %esi,8(%esp) # qhasm: edi_stack = edi # asm 1: movl edi_stack=stack32#4 # asm 2: movl edi_stack=12(%esp) movl %edi,12(%esp) # qhasm: ebp_stack = ebp # asm 1: movl ebp_stack=stack32#5 # asm 2: movl ebp_stack=16(%esp) movl %ebp,16(%esp) # qhasm: bytes = arg2 # asm 1: movl bytes=int32#3 # asm 2: movl bytes=%edx movl 8(%esp,%eax),%edx # qhasm: out = arg1 # asm 1: movl out=int32#6 # asm 2: movl out=%edi movl 4(%esp,%eax),%edi # qhasm: m = out # asm 1: mov m=int32#5 # asm 2: mov m=%esi mov %edi,%esi # qhasm: iv = arg4 # asm 1: movl iv=int32#4 # asm 2: movl iv=%ebx movl 16(%esp,%eax),%ebx # qhasm: k = arg5 # asm 1: movl k=int32#7 # asm 2: movl k=%ebp movl 20(%esp,%eax),%ebp # qhasm: unsigned>? bytes - 0 # asm 1: cmp $0, jbe ._done # qhasm: a = 0 # asm 1: mov $0,>a=int32#1 # asm 2: mov $0,>a=%eax mov $0,%eax # qhasm: i = bytes # asm 1: mov i=int32#2 # asm 2: mov i=%ecx mov %edx,%ecx # qhasm: while (i) { *out++ = a; --i } rep stosb # qhasm: out -= bytes # asm 1: subl eax_stack=stack32#1 # asm 2: movl eax_stack=0(%esp) movl %eax,0(%esp) # qhasm: ebx_stack = ebx # asm 1: movl ebx_stack=stack32#2 # asm 2: movl ebx_stack=4(%esp) movl %ebx,4(%esp) # qhasm: esi_stack = esi # asm 1: movl esi_stack=stack32#3 # asm 2: movl esi_stack=8(%esp) movl %esi,8(%esp) # qhasm: edi_stack = edi # asm 1: movl edi_stack=stack32#4 # asm 2: movl edi_stack=12(%esp) movl %edi,12(%esp) # qhasm: ebp_stack = ebp # asm 1: movl ebp_stack=stack32#5 # asm 2: movl ebp_stack=16(%esp) movl %ebp,16(%esp) # qhasm: out = arg1 # asm 1: movl out=int32#6 # asm 2: movl out=%edi movl 4(%esp,%eax),%edi # qhasm: m = arg2 # asm 1: movl m=int32#5 # asm 2: movl m=%esi movl 8(%esp,%eax),%esi # qhasm: bytes = arg3 # asm 1: movl bytes=int32#3 # asm 2: movl bytes=%edx movl 12(%esp,%eax),%edx # qhasm: iv = arg5 # asm 1: movl iv=int32#4 # asm 2: movl iv=%ebx movl 20(%esp,%eax),%ebx # qhasm: k = arg6 # asm 1: movl k=int32#7 # asm 2: movl k=%ebp movl 24(%esp,%eax),%ebp # qhasm: unsigned>? bytes - 0 # asm 1: cmp $0, jbe ._done # comment:fp stack unchanged by fallthrough # qhasm: start: ._start: # qhasm: out_stack = out # asm 1: movl out_stack=stack32#6 # asm 2: movl out_stack=20(%esp) movl %edi,20(%esp) # qhasm: bytes_stack = bytes # asm 1: movl bytes_stack=stack32#7 # asm 2: movl bytes_stack=24(%esp) movl %edx,24(%esp) # qhasm: in4 = *(uint32 *) (k + 12) # asm 1: movl 12(in4=int32#1 # asm 2: movl 12(in4=%eax movl 12(%ebp),%eax # qhasm: in12 = *(uint32 *) (k + 20) # asm 1: movl 20(in12=int32#2 # asm 2: movl 20(in12=%ecx movl 20(%ebp),%ecx # qhasm: ((uint32 *)&x3)[0] = in4 # asm 1: movl x3=stack128#1 # asm 2: movl x3=32(%esp) movl %eax,32(%esp) # qhasm: ((uint32 *)&x1)[0] = in12 # asm 1: movl x1=stack128#2 # asm 2: movl x1=48(%esp) movl %ecx,48(%esp) # qhasm: in0 = 1634760805 # asm 1: mov $1634760805,>in0=int32#1 # asm 2: mov $1634760805,>in0=%eax mov $1634760805,%eax # qhasm: in8 = 0 # asm 1: mov $0,>in8=int32#2 # asm 2: mov $0,>in8=%ecx mov $0,%ecx # qhasm: ((uint32 *)&x0)[0] = in0 # asm 1: movl x0=stack128#3 # asm 2: movl x0=64(%esp) movl %eax,64(%esp) # qhasm: ((uint32 *)&x2)[0] = in8 # asm 1: movl x2=stack128#4 # asm 2: movl x2=80(%esp) movl %ecx,80(%esp) # qhasm: in6 = *(uint32 *) (iv + 0) # asm 1: movl 0(in6=int32#1 # asm 2: movl 0(in6=%eax movl 0(%ebx),%eax # qhasm: in7 = *(uint32 *) (iv + 4) # asm 1: movl 4(in7=int32#2 # asm 2: movl 4(in7=%ecx movl 4(%ebx),%ecx # qhasm: ((uint32 *)&x1)[2] = in6 # asm 1: movl in9=int32#1 # asm 2: mov $0,>in9=%eax mov $0,%eax # qhasm: in10 = 2036477234 # asm 1: mov $2036477234,>in10=int32#2 # asm 2: mov $2036477234,>in10=%ecx mov $2036477234,%ecx # qhasm: ((uint32 *)&x3)[1] = in9 # asm 1: movl in1=int32#1 # asm 2: movl 0(in1=%eax movl 0(%ebp),%eax # qhasm: in2 = *(uint32 *) (k + 4) # asm 1: movl 4(in2=int32#2 # asm 2: movl 4(in2=%ecx movl 4(%ebp),%ecx # qhasm: in3 = *(uint32 *) (k + 8) # asm 1: movl 8(in3=int32#3 # asm 2: movl 8(in3=%edx movl 8(%ebp),%edx # qhasm: in5 = 857760878 # asm 1: mov $857760878,>in5=int32#4 # asm 2: mov $857760878,>in5=%ebx mov $857760878,%ebx # qhasm: ((uint32 *)&x1)[1] = in1 # asm 1: movl in11=int32#1 # asm 2: movl 16(in11=%eax movl 16(%ebp),%eax # qhasm: in13 = *(uint32 *) (k + 24) # asm 1: movl 24(in13=int32#2 # asm 2: movl 24(in13=%ecx movl 24(%ebp),%ecx # qhasm: in14 = *(uint32 *) (k + 28) # asm 1: movl 28(in14=int32#3 # asm 2: movl 28(in14=%edx movl 28(%ebp),%edx # qhasm: in15 = 1797285236 # asm 1: mov $1797285236,>in15=int32#4 # asm 2: mov $1797285236,>in15=%ebx mov $1797285236,%ebx # qhasm: ((uint32 *)&x1)[3] = in11 # asm 1: movl bytes=int32#1 # asm 2: movl bytes=%eax movl 24(%esp),%eax # qhasm: unsignedz0=int6464#1 # asm 2: movdqa z0=%xmm0 movdqa 64(%esp),%xmm0 # qhasm: z5 = z0[1,1,1,1] # asm 1: pshufd $0x55,z5=int6464#2 # asm 2: pshufd $0x55,z5=%xmm1 pshufd $0x55,%xmm0,%xmm1 # qhasm: z10 = z0[2,2,2,2] # asm 1: pshufd $0xaa,z10=int6464#3 # asm 2: pshufd $0xaa,z10=%xmm2 pshufd $0xaa,%xmm0,%xmm2 # qhasm: z15 = z0[3,3,3,3] # asm 1: pshufd $0xff,z15=int6464#4 # asm 2: pshufd $0xff,z15=%xmm3 pshufd $0xff,%xmm0,%xmm3 # qhasm: z0 = z0[0,0,0,0] # asm 1: pshufd $0x00,z0=int6464#1 # asm 2: pshufd $0x00,z0=%xmm0 pshufd $0x00,%xmm0,%xmm0 # qhasm: orig5 = z5 # asm 1: movdqa orig5=stack128#5 # asm 2: movdqa orig5=96(%esp) movdqa %xmm1,96(%esp) # qhasm: orig10 = z10 # asm 1: movdqa orig10=stack128#6 # asm 2: movdqa orig10=112(%esp) movdqa %xmm2,112(%esp) # qhasm: orig15 = z15 # asm 1: movdqa orig15=stack128#7 # asm 2: movdqa orig15=128(%esp) movdqa %xmm3,128(%esp) # qhasm: orig0 = z0 # asm 1: movdqa orig0=stack128#8 # asm 2: movdqa orig0=144(%esp) movdqa %xmm0,144(%esp) # qhasm: z1 = x1 # asm 1: movdqa z1=int6464#1 # asm 2: movdqa z1=%xmm0 movdqa 48(%esp),%xmm0 # qhasm: z6 = z1[2,2,2,2] # asm 1: pshufd $0xaa,z6=int6464#2 # asm 2: pshufd $0xaa,z6=%xmm1 pshufd $0xaa,%xmm0,%xmm1 # qhasm: z11 = z1[3,3,3,3] # asm 1: pshufd $0xff,z11=int6464#3 # asm 2: pshufd $0xff,z11=%xmm2 pshufd $0xff,%xmm0,%xmm2 # qhasm: z12 = z1[0,0,0,0] # asm 1: pshufd $0x00,z12=int6464#4 # asm 2: pshufd $0x00,z12=%xmm3 pshufd $0x00,%xmm0,%xmm3 # qhasm: z1 = z1[1,1,1,1] # asm 1: pshufd $0x55,z1=int6464#1 # asm 2: pshufd $0x55,z1=%xmm0 pshufd $0x55,%xmm0,%xmm0 # qhasm: orig6 = z6 # asm 1: movdqa orig6=stack128#9 # asm 2: movdqa orig6=160(%esp) movdqa %xmm1,160(%esp) # qhasm: orig11 = z11 # asm 1: movdqa orig11=stack128#10 # asm 2: movdqa orig11=176(%esp) movdqa %xmm2,176(%esp) # qhasm: orig12 = z12 # asm 1: movdqa orig12=stack128#11 # asm 2: movdqa orig12=192(%esp) movdqa %xmm3,192(%esp) # qhasm: orig1 = z1 # asm 1: movdqa orig1=stack128#12 # asm 2: movdqa orig1=208(%esp) movdqa %xmm0,208(%esp) # qhasm: z2 = x2 # asm 1: movdqa z2=int6464#1 # asm 2: movdqa z2=%xmm0 movdqa 80(%esp),%xmm0 # qhasm: z7 = z2[3,3,3,3] # asm 1: pshufd $0xff,z7=int6464#2 # asm 2: pshufd $0xff,z7=%xmm1 pshufd $0xff,%xmm0,%xmm1 # qhasm: z13 = z2[1,1,1,1] # asm 1: pshufd $0x55,z13=int6464#3 # asm 2: pshufd $0x55,z13=%xmm2 pshufd $0x55,%xmm0,%xmm2 # qhasm: z2 = z2[2,2,2,2] # asm 1: pshufd $0xaa,z2=int6464#1 # asm 2: pshufd $0xaa,z2=%xmm0 pshufd $0xaa,%xmm0,%xmm0 # qhasm: orig7 = z7 # asm 1: movdqa orig7=stack128#13 # asm 2: movdqa orig7=224(%esp) movdqa %xmm1,224(%esp) # qhasm: orig13 = z13 # asm 1: movdqa orig13=stack128#14 # asm 2: movdqa orig13=240(%esp) movdqa %xmm2,240(%esp) # qhasm: orig2 = z2 # asm 1: movdqa orig2=stack128#15 # asm 2: movdqa orig2=256(%esp) movdqa %xmm0,256(%esp) # qhasm: z3 = x3 # asm 1: movdqa z3=int6464#1 # asm 2: movdqa z3=%xmm0 movdqa 32(%esp),%xmm0 # qhasm: z4 = z3[0,0,0,0] # asm 1: pshufd $0x00,z4=int6464#2 # asm 2: pshufd $0x00,z4=%xmm1 pshufd $0x00,%xmm0,%xmm1 # qhasm: z14 = z3[2,2,2,2] # asm 1: pshufd $0xaa,z14=int6464#3 # asm 2: pshufd $0xaa,z14=%xmm2 pshufd $0xaa,%xmm0,%xmm2 # qhasm: z3 = z3[3,3,3,3] # asm 1: pshufd $0xff,z3=int6464#1 # asm 2: pshufd $0xff,z3=%xmm0 pshufd $0xff,%xmm0,%xmm0 # qhasm: orig4 = z4 # asm 1: movdqa orig4=stack128#16 # asm 2: movdqa orig4=272(%esp) movdqa %xmm1,272(%esp) # qhasm: orig14 = z14 # asm 1: movdqa orig14=stack128#17 # asm 2: movdqa orig14=288(%esp) movdqa %xmm2,288(%esp) # qhasm: orig3 = z3 # asm 1: movdqa orig3=stack128#18 # asm 2: movdqa orig3=304(%esp) movdqa %xmm0,304(%esp) # qhasm: bytesatleast256: ._bytesatleast256: # qhasm: in8 = ((uint32 *)&x2)[0] # asm 1: movl in8=int32#2 # asm 2: movl in8=%ecx movl 80(%esp),%ecx # qhasm: in9 = ((uint32 *)&x3)[1] # asm 1: movl 4+in9=int32#3 # asm 2: movl 4+in9=%edx movl 4+32(%esp),%edx # qhasm: ((uint32 *) &orig8)[0] = in8 # asm 1: movl orig8=stack128#19 # asm 2: movl orig8=320(%esp) movl %ecx,320(%esp) # qhasm: ((uint32 *) &orig9)[0] = in9 # asm 1: movl orig9=stack128#20 # asm 2: movl orig9=336(%esp) movl %edx,336(%esp) # qhasm: carry? in8 += 1 # asm 1: add $1,x2=stack128#4 # asm 2: movl x2=80(%esp) movl %ecx,80(%esp) # qhasm: ((uint32 *)&x3)[1] = in9 # asm 1: movl bytes_stack=stack32#7 # asm 2: movl bytes_stack=24(%esp) movl %eax,24(%esp) # qhasm: i = 12 # asm 1: mov $12,>i=int32#1 # asm 2: mov $12,>i=%eax mov $12,%eax # qhasm: z5 = orig5 # asm 1: movdqa z5=int6464#1 # asm 2: movdqa z5=%xmm0 movdqa 96(%esp),%xmm0 # qhasm: z10 = orig10 # asm 1: movdqa z10=int6464#2 # asm 2: movdqa z10=%xmm1 movdqa 112(%esp),%xmm1 # qhasm: z15 = orig15 # asm 1: movdqa z15=int6464#3 # asm 2: movdqa z15=%xmm2 movdqa 128(%esp),%xmm2 # qhasm: z14 = orig14 # asm 1: movdqa z14=int6464#4 # asm 2: movdqa z14=%xmm3 movdqa 288(%esp),%xmm3 # qhasm: z3 = orig3 # asm 1: movdqa z3=int6464#5 # asm 2: movdqa z3=%xmm4 movdqa 304(%esp),%xmm4 # qhasm: z6 = orig6 # asm 1: movdqa z6=int6464#6 # asm 2: movdqa z6=%xmm5 movdqa 160(%esp),%xmm5 # qhasm: z11 = orig11 # asm 1: movdqa z11=int6464#7 # asm 2: movdqa z11=%xmm6 movdqa 176(%esp),%xmm6 # qhasm: z1 = orig1 # asm 1: movdqa z1=int6464#8 # asm 2: movdqa z1=%xmm7 movdqa 208(%esp),%xmm7 # qhasm: z5_stack = z5 # asm 1: movdqa z5_stack=stack128#21 # asm 2: movdqa z5_stack=352(%esp) movdqa %xmm0,352(%esp) # qhasm: z10_stack = z10 # asm 1: movdqa z10_stack=stack128#22 # asm 2: movdqa z10_stack=368(%esp) movdqa %xmm1,368(%esp) # qhasm: z15_stack = z15 # asm 1: movdqa z15_stack=stack128#23 # asm 2: movdqa z15_stack=384(%esp) movdqa %xmm2,384(%esp) # qhasm: z14_stack = z14 # asm 1: movdqa z14_stack=stack128#24 # asm 2: movdqa z14_stack=400(%esp) movdqa %xmm3,400(%esp) # qhasm: z3_stack = z3 # asm 1: movdqa z3_stack=stack128#25 # asm 2: movdqa z3_stack=416(%esp) movdqa %xmm4,416(%esp) # qhasm: z6_stack = z6 # asm 1: movdqa z6_stack=stack128#26 # asm 2: movdqa z6_stack=432(%esp) movdqa %xmm5,432(%esp) # qhasm: z11_stack = z11 # asm 1: movdqa z11_stack=stack128#27 # asm 2: movdqa z11_stack=448(%esp) movdqa %xmm6,448(%esp) # qhasm: z1_stack = z1 # asm 1: movdqa z1_stack=stack128#28 # asm 2: movdqa z1_stack=464(%esp) movdqa %xmm7,464(%esp) # qhasm: z7 = orig7 # asm 1: movdqa z7=int6464#5 # asm 2: movdqa z7=%xmm4 movdqa 224(%esp),%xmm4 # qhasm: z13 = orig13 # asm 1: movdqa z13=int6464#6 # asm 2: movdqa z13=%xmm5 movdqa 240(%esp),%xmm5 # qhasm: z2 = orig2 # asm 1: movdqa z2=int6464#7 # asm 2: movdqa z2=%xmm6 movdqa 256(%esp),%xmm6 # qhasm: z9 = orig9 # asm 1: movdqa z9=int6464#8 # asm 2: movdqa z9=%xmm7 movdqa 336(%esp),%xmm7 # qhasm: p = orig0 # asm 1: movdqa p=int6464#1 # asm 2: movdqa p=%xmm0 movdqa 144(%esp),%xmm0 # qhasm: t = orig12 # asm 1: movdqa t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa 192(%esp),%xmm2 # qhasm: q = orig4 # asm 1: movdqa q=int6464#4 # asm 2: movdqa q=%xmm3 movdqa 272(%esp),%xmm3 # qhasm: r = orig8 # asm 1: movdqa r=int6464#2 # asm 2: movdqa r=%xmm1 movdqa 320(%esp),%xmm1 # qhasm: z7_stack = z7 # asm 1: movdqa z7_stack=stack128#29 # asm 2: movdqa z7_stack=480(%esp) movdqa %xmm4,480(%esp) # qhasm: z13_stack = z13 # asm 1: movdqa z13_stack=stack128#30 # asm 2: movdqa z13_stack=496(%esp) movdqa %xmm5,496(%esp) # qhasm: z2_stack = z2 # asm 1: movdqa z2_stack=stack128#31 # asm 2: movdqa z2_stack=512(%esp) movdqa %xmm6,512(%esp) # qhasm: z9_stack = z9 # asm 1: movdqa z9_stack=stack128#32 # asm 2: movdqa z9_stack=528(%esp) movdqa %xmm7,528(%esp) # qhasm: z0_stack = p # asm 1: movdqa z0_stack=stack128#33 # asm 2: movdqa z0_stack=544(%esp) movdqa %xmm0,544(%esp) # qhasm: z12_stack = t # asm 1: movdqa z12_stack=stack128#34 # asm 2: movdqa z12_stack=560(%esp) movdqa %xmm2,560(%esp) # qhasm: z4_stack = q # asm 1: movdqa z4_stack=stack128#35 # asm 2: movdqa z4_stack=576(%esp) movdqa %xmm3,576(%esp) # qhasm: z8_stack = r # asm 1: movdqa z8_stack=stack128#36 # asm 2: movdqa z8_stack=592(%esp) movdqa %xmm1,592(%esp) # qhasm: mainloop1: ._mainloop1: # qhasm: assign xmm0 to p # qhasm: assign xmm1 to r # qhasm: assign xmm2 to t # qhasm: assign xmm3 to q # qhasm: s = t # asm 1: movdqa s=int6464#7 # asm 2: movdqa s=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 t += p # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 25 # asm 1: psrld $25,z4_stack=stack128#33 # asm 2: movdqa z4_stack=544(%esp) movdqa %xmm3,544(%esp) # qhasm: t = p # asm 1: movdqa t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa %xmm0,%xmm2 # qhasm: uint32323232 t += q # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 23 # asm 1: psrld $23,z8_stack=stack128#34 # asm 2: movdqa z8_stack=560(%esp) movdqa %xmm1,560(%esp) # qhasm: uint32323232 q += r # asm 1: paddd u=int6464#3 # asm 2: movdqa u=%xmm2 movdqa %xmm3,%xmm2 # qhasm: uint32323232 q >>= 19 # asm 1: psrld $19,mt=int6464#3 # asm 2: movdqa mt=%xmm2 movdqa 464(%esp),%xmm2 # qhasm: mp = z5_stack # asm 1: movdqa mp=int6464#5 # asm 2: movdqa mp=%xmm4 movdqa 352(%esp),%xmm4 # qhasm: mq = z9_stack # asm 1: movdqa mq=int6464#4 # asm 2: movdqa mq=%xmm3 movdqa 528(%esp),%xmm3 # qhasm: mr = z13_stack # asm 1: movdqa mr=int6464#6 # asm 2: movdqa mr=%xmm5 movdqa 496(%esp),%xmm5 # qhasm: z12_stack = s # asm 1: movdqa z12_stack=stack128#30 # asm 2: movdqa z12_stack=496(%esp) movdqa %xmm6,496(%esp) # qhasm: uint32323232 r += s # asm 1: paddd u=int6464#7 # asm 2: movdqa u=%xmm6 movdqa %xmm1,%xmm6 # qhasm: uint32323232 r >>= 14 # asm 1: psrld $14,z0_stack=stack128#21 # asm 2: movdqa z0_stack=352(%esp) movdqa %xmm0,352(%esp) # qhasm: assign xmm2 to mt # qhasm: assign xmm3 to mq # qhasm: assign xmm4 to mp # qhasm: assign xmm5 to mr # qhasm: ms = mt # asm 1: movdqa ms=int6464#7 # asm 2: movdqa ms=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 mt += mp # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm2,%xmm0 # qhasm: uint32323232 mt >>= 25 # asm 1: psrld $25,z9_stack=stack128#32 # asm 2: movdqa z9_stack=528(%esp) movdqa %xmm3,528(%esp) # qhasm: mt = mp # asm 1: movdqa mt=int6464#1 # asm 2: movdqa mt=%xmm0 movdqa %xmm4,%xmm0 # qhasm: uint32323232 mt += mq # asm 1: paddd mu=int6464#2 # asm 2: movdqa mu=%xmm1 movdqa %xmm0,%xmm1 # qhasm: uint32323232 mt >>= 23 # asm 1: psrld $23,z13_stack=stack128#35 # asm 2: movdqa z13_stack=576(%esp) movdqa %xmm5,576(%esp) # qhasm: uint32323232 mq += mr # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm3,%xmm0 # qhasm: uint32323232 mq >>= 19 # asm 1: psrld $19,t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa 432(%esp),%xmm2 # qhasm: p = z10_stack # asm 1: movdqa p=int6464#1 # asm 2: movdqa p=%xmm0 movdqa 368(%esp),%xmm0 # qhasm: q = z14_stack # asm 1: movdqa q=int6464#4 # asm 2: movdqa q=%xmm3 movdqa 400(%esp),%xmm3 # qhasm: r = z2_stack # asm 1: movdqa r=int6464#2 # asm 2: movdqa r=%xmm1 movdqa 512(%esp),%xmm1 # qhasm: z1_stack = ms # asm 1: movdqa z1_stack=stack128#22 # asm 2: movdqa z1_stack=368(%esp) movdqa %xmm6,368(%esp) # qhasm: uint32323232 mr += ms # asm 1: paddd mu=int6464#7 # asm 2: movdqa mu=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 mr >>= 14 # asm 1: psrld $14,z5_stack=stack128#24 # asm 2: movdqa z5_stack=400(%esp) movdqa %xmm4,400(%esp) # qhasm: assign xmm0 to p # qhasm: assign xmm1 to r # qhasm: assign xmm2 to t # qhasm: assign xmm3 to q # qhasm: s = t # asm 1: movdqa s=int6464#7 # asm 2: movdqa s=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 t += p # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 25 # asm 1: psrld $25,z14_stack=stack128#36 # asm 2: movdqa z14_stack=592(%esp) movdqa %xmm3,592(%esp) # qhasm: t = p # asm 1: movdqa t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa %xmm0,%xmm2 # qhasm: uint32323232 t += q # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 23 # asm 1: psrld $23,z2_stack=stack128#26 # asm 2: movdqa z2_stack=432(%esp) movdqa %xmm1,432(%esp) # qhasm: uint32323232 q += r # asm 1: paddd u=int6464#3 # asm 2: movdqa u=%xmm2 movdqa %xmm3,%xmm2 # qhasm: uint32323232 q >>= 19 # asm 1: psrld $19,mt=int6464#3 # asm 2: movdqa mt=%xmm2 movdqa 448(%esp),%xmm2 # qhasm: mp = z15_stack # asm 1: movdqa mp=int6464#5 # asm 2: movdqa mp=%xmm4 movdqa 384(%esp),%xmm4 # qhasm: mq = z3_stack # asm 1: movdqa mq=int6464#4 # asm 2: movdqa mq=%xmm3 movdqa 416(%esp),%xmm3 # qhasm: mr = z7_stack # asm 1: movdqa mr=int6464#6 # asm 2: movdqa mr=%xmm5 movdqa 480(%esp),%xmm5 # qhasm: z6_stack = s # asm 1: movdqa z6_stack=stack128#23 # asm 2: movdqa z6_stack=384(%esp) movdqa %xmm6,384(%esp) # qhasm: uint32323232 r += s # asm 1: paddd u=int6464#7 # asm 2: movdqa u=%xmm6 movdqa %xmm1,%xmm6 # qhasm: uint32323232 r >>= 14 # asm 1: psrld $14,z10_stack=stack128#27 # asm 2: movdqa z10_stack=448(%esp) movdqa %xmm0,448(%esp) # qhasm: assign xmm2 to mt # qhasm: assign xmm3 to mq # qhasm: assign xmm4 to mp # qhasm: assign xmm5 to mr # qhasm: ms = mt # asm 1: movdqa ms=int6464#7 # asm 2: movdqa ms=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 mt += mp # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm2,%xmm0 # qhasm: uint32323232 mt >>= 25 # asm 1: psrld $25,z3_stack=stack128#25 # asm 2: movdqa z3_stack=416(%esp) movdqa %xmm3,416(%esp) # qhasm: mt = mp # asm 1: movdqa mt=int6464#1 # asm 2: movdqa mt=%xmm0 movdqa %xmm4,%xmm0 # qhasm: uint32323232 mt += mq # asm 1: paddd mu=int6464#2 # asm 2: movdqa mu=%xmm1 movdqa %xmm0,%xmm1 # qhasm: uint32323232 mt >>= 23 # asm 1: psrld $23,z7_stack=stack128#29 # asm 2: movdqa z7_stack=480(%esp) movdqa %xmm5,480(%esp) # qhasm: uint32323232 mq += mr # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm3,%xmm0 # qhasm: uint32323232 mq >>= 19 # asm 1: psrld $19,t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa 416(%esp),%xmm2 # qhasm: p = z0_stack # asm 1: movdqa p=int6464#1 # asm 2: movdqa p=%xmm0 movdqa 352(%esp),%xmm0 # qhasm: q = z1_stack # asm 1: movdqa q=int6464#4 # asm 2: movdqa q=%xmm3 movdqa 368(%esp),%xmm3 # qhasm: r = z2_stack # asm 1: movdqa r=int6464#2 # asm 2: movdqa r=%xmm1 movdqa 432(%esp),%xmm1 # qhasm: z11_stack = ms # asm 1: movdqa z11_stack=stack128#21 # asm 2: movdqa z11_stack=352(%esp) movdqa %xmm6,352(%esp) # qhasm: uint32323232 mr += ms # asm 1: paddd mu=int6464#7 # asm 2: movdqa mu=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 mr >>= 14 # asm 1: psrld $14,z15_stack=stack128#22 # asm 2: movdqa z15_stack=368(%esp) movdqa %xmm4,368(%esp) # qhasm: assign xmm0 to p # qhasm: assign xmm1 to r # qhasm: assign xmm2 to t # qhasm: assign xmm3 to q # qhasm: s = t # asm 1: movdqa s=int6464#7 # asm 2: movdqa s=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 t += p # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 25 # asm 1: psrld $25,z1_stack=stack128#28 # asm 2: movdqa z1_stack=464(%esp) movdqa %xmm3,464(%esp) # qhasm: t = p # asm 1: movdqa t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa %xmm0,%xmm2 # qhasm: uint32323232 t += q # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 23 # asm 1: psrld $23,z2_stack=stack128#31 # asm 2: movdqa z2_stack=512(%esp) movdqa %xmm1,512(%esp) # qhasm: uint32323232 q += r # asm 1: paddd u=int6464#3 # asm 2: movdqa u=%xmm2 movdqa %xmm3,%xmm2 # qhasm: uint32323232 q >>= 19 # asm 1: psrld $19,mt=int6464#3 # asm 2: movdqa mt=%xmm2 movdqa 544(%esp),%xmm2 # qhasm: mp = z5_stack # asm 1: movdqa mp=int6464#5 # asm 2: movdqa mp=%xmm4 movdqa 400(%esp),%xmm4 # qhasm: mq = z6_stack # asm 1: movdqa mq=int6464#4 # asm 2: movdqa mq=%xmm3 movdqa 384(%esp),%xmm3 # qhasm: mr = z7_stack # asm 1: movdqa mr=int6464#6 # asm 2: movdqa mr=%xmm5 movdqa 480(%esp),%xmm5 # qhasm: z3_stack = s # asm 1: movdqa z3_stack=stack128#25 # asm 2: movdqa z3_stack=416(%esp) movdqa %xmm6,416(%esp) # qhasm: uint32323232 r += s # asm 1: paddd u=int6464#7 # asm 2: movdqa u=%xmm6 movdqa %xmm1,%xmm6 # qhasm: uint32323232 r >>= 14 # asm 1: psrld $14,z0_stack=stack128#33 # asm 2: movdqa z0_stack=544(%esp) movdqa %xmm0,544(%esp) # qhasm: assign xmm2 to mt # qhasm: assign xmm3 to mq # qhasm: assign xmm4 to mp # qhasm: assign xmm5 to mr # qhasm: ms = mt # asm 1: movdqa ms=int6464#7 # asm 2: movdqa ms=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 mt += mp # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm2,%xmm0 # qhasm: uint32323232 mt >>= 25 # asm 1: psrld $25,z6_stack=stack128#26 # asm 2: movdqa z6_stack=432(%esp) movdqa %xmm3,432(%esp) # qhasm: mt = mp # asm 1: movdqa mt=int6464#1 # asm 2: movdqa mt=%xmm0 movdqa %xmm4,%xmm0 # qhasm: uint32323232 mt += mq # asm 1: paddd mu=int6464#2 # asm 2: movdqa mu=%xmm1 movdqa %xmm0,%xmm1 # qhasm: uint32323232 mt >>= 23 # asm 1: psrld $23,z7_stack=stack128#29 # asm 2: movdqa z7_stack=480(%esp) movdqa %xmm5,480(%esp) # qhasm: uint32323232 mq += mr # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm3,%xmm0 # qhasm: uint32323232 mq >>= 19 # asm 1: psrld $19,t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa 528(%esp),%xmm2 # qhasm: p = z10_stack # asm 1: movdqa p=int6464#1 # asm 2: movdqa p=%xmm0 movdqa 448(%esp),%xmm0 # qhasm: q = z11_stack # asm 1: movdqa q=int6464#4 # asm 2: movdqa q=%xmm3 movdqa 352(%esp),%xmm3 # qhasm: r = z8_stack # asm 1: movdqa r=int6464#2 # asm 2: movdqa r=%xmm1 movdqa 560(%esp),%xmm1 # qhasm: z4_stack = ms # asm 1: movdqa z4_stack=stack128#34 # asm 2: movdqa z4_stack=560(%esp) movdqa %xmm6,560(%esp) # qhasm: uint32323232 mr += ms # asm 1: paddd mu=int6464#7 # asm 2: movdqa mu=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 mr >>= 14 # asm 1: psrld $14,z5_stack=stack128#21 # asm 2: movdqa z5_stack=352(%esp) movdqa %xmm4,352(%esp) # qhasm: assign xmm0 to p # qhasm: assign xmm1 to r # qhasm: assign xmm2 to t # qhasm: assign xmm3 to q # qhasm: s = t # asm 1: movdqa s=int6464#7 # asm 2: movdqa s=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 t += p # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 25 # asm 1: psrld $25,z11_stack=stack128#27 # asm 2: movdqa z11_stack=448(%esp) movdqa %xmm3,448(%esp) # qhasm: t = p # asm 1: movdqa t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa %xmm0,%xmm2 # qhasm: uint32323232 t += q # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 23 # asm 1: psrld $23,z8_stack=stack128#37 # asm 2: movdqa z8_stack=608(%esp) movdqa %xmm1,608(%esp) # qhasm: uint32323232 q += r # asm 1: paddd u=int6464#3 # asm 2: movdqa u=%xmm2 movdqa %xmm3,%xmm2 # qhasm: uint32323232 q >>= 19 # asm 1: psrld $19,mt=int6464#3 # asm 2: movdqa mt=%xmm2 movdqa 592(%esp),%xmm2 # qhasm: mp = z15_stack # asm 1: movdqa mp=int6464#5 # asm 2: movdqa mp=%xmm4 movdqa 368(%esp),%xmm4 # qhasm: mq = z12_stack # asm 1: movdqa mq=int6464#4 # asm 2: movdqa mq=%xmm3 movdqa 496(%esp),%xmm3 # qhasm: mr = z13_stack # asm 1: movdqa mr=int6464#6 # asm 2: movdqa mr=%xmm5 movdqa 576(%esp),%xmm5 # qhasm: z9_stack = s # asm 1: movdqa z9_stack=stack128#32 # asm 2: movdqa z9_stack=528(%esp) movdqa %xmm6,528(%esp) # qhasm: uint32323232 r += s # asm 1: paddd u=int6464#7 # asm 2: movdqa u=%xmm6 movdqa %xmm1,%xmm6 # qhasm: uint32323232 r >>= 14 # asm 1: psrld $14,z10_stack=stack128#22 # asm 2: movdqa z10_stack=368(%esp) movdqa %xmm0,368(%esp) # qhasm: assign xmm2 to mt # qhasm: assign xmm3 to mq # qhasm: assign xmm4 to mp # qhasm: assign xmm5 to mr # qhasm: ms = mt # asm 1: movdqa ms=int6464#7 # asm 2: movdqa ms=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 mt += mp # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm2,%xmm0 # qhasm: uint32323232 mt >>= 25 # asm 1: psrld $25,z12_stack=stack128#35 # asm 2: movdqa z12_stack=576(%esp) movdqa %xmm3,576(%esp) # qhasm: mt = mp # asm 1: movdqa mt=int6464#1 # asm 2: movdqa mt=%xmm0 movdqa %xmm4,%xmm0 # qhasm: uint32323232 mt += mq # asm 1: paddd mu=int6464#2 # asm 2: movdqa mu=%xmm1 movdqa %xmm0,%xmm1 # qhasm: uint32323232 mt >>= 23 # asm 1: psrld $23,z13_stack=stack128#30 # asm 2: movdqa z13_stack=496(%esp) movdqa %xmm5,496(%esp) # qhasm: uint32323232 mq += mr # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm3,%xmm0 # qhasm: uint32323232 mq >>= 19 # asm 1: psrld $19,t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa 576(%esp),%xmm2 # qhasm: p = z0_stack # asm 1: movdqa p=int6464#1 # asm 2: movdqa p=%xmm0 movdqa 544(%esp),%xmm0 # qhasm: q = z4_stack # asm 1: movdqa q=int6464#4 # asm 2: movdqa q=%xmm3 movdqa 560(%esp),%xmm3 # qhasm: r = z8_stack # asm 1: movdqa r=int6464#2 # asm 2: movdqa r=%xmm1 movdqa 608(%esp),%xmm1 # qhasm: z14_stack = ms # asm 1: movdqa z14_stack=stack128#24 # asm 2: movdqa z14_stack=400(%esp) movdqa %xmm6,400(%esp) # qhasm: uint32323232 mr += ms # asm 1: paddd mu=int6464#7 # asm 2: movdqa mu=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 mr >>= 14 # asm 1: psrld $14,z15_stack=stack128#23 # asm 2: movdqa z15_stack=384(%esp) movdqa %xmm4,384(%esp) # qhasm: unsigned>? i -= 2 # asm 1: sub $2, ja ._mainloop1 # qhasm: out = out_stack # asm 1: movl out=int32#6 # asm 2: movl out=%edi movl 20(%esp),%edi # qhasm: z0 = z0_stack # asm 1: movdqa z0=int6464#1 # asm 2: movdqa z0=%xmm0 movdqa 544(%esp),%xmm0 # qhasm: z1 = z1_stack # asm 1: movdqa z1=int6464#2 # asm 2: movdqa z1=%xmm1 movdqa 464(%esp),%xmm1 # qhasm: z2 = z2_stack # asm 1: movdqa z2=int6464#3 # asm 2: movdqa z2=%xmm2 movdqa 512(%esp),%xmm2 # qhasm: z3 = z3_stack # asm 1: movdqa z3=int6464#4 # asm 2: movdqa z3=%xmm3 movdqa 416(%esp),%xmm3 # qhasm: uint32323232 z0 += orig0 # asm 1: paddd in0=int32#1 # asm 2: movd in0=%eax movd %xmm0,%eax # qhasm: in1 = z1 # asm 1: movd in1=int32#2 # asm 2: movd in1=%ecx movd %xmm1,%ecx # qhasm: in2 = z2 # asm 1: movd in2=int32#3 # asm 2: movd in2=%edx movd %xmm2,%edx # qhasm: in3 = z3 # asm 1: movd in3=int32#4 # asm 2: movd in3=%ebx movd %xmm3,%ebx # qhasm: z0 <<<= 96 # asm 1: pshufd $0x39,in0=int32#1 # asm 2: movd in0=%eax movd %xmm0,%eax # qhasm: in1 = z1 # asm 1: movd in1=int32#2 # asm 2: movd in1=%ecx movd %xmm1,%ecx # qhasm: in2 = z2 # asm 1: movd in2=int32#3 # asm 2: movd in2=%edx movd %xmm2,%edx # qhasm: in3 = z3 # asm 1: movd in3=int32#4 # asm 2: movd in3=%ebx movd %xmm3,%ebx # qhasm: z0 <<<= 96 # asm 1: pshufd $0x39,in0=int32#1 # asm 2: movd in0=%eax movd %xmm0,%eax # qhasm: in1 = z1 # asm 1: movd in1=int32#2 # asm 2: movd in1=%ecx movd %xmm1,%ecx # qhasm: in2 = z2 # asm 1: movd in2=int32#3 # asm 2: movd in2=%edx movd %xmm2,%edx # qhasm: in3 = z3 # asm 1: movd in3=int32#4 # asm 2: movd in3=%ebx movd %xmm3,%ebx # qhasm: z0 <<<= 96 # asm 1: pshufd $0x39,in0=int32#1 # asm 2: movd in0=%eax movd %xmm0,%eax # qhasm: in1 = z1 # asm 1: movd in1=int32#2 # asm 2: movd in1=%ecx movd %xmm1,%ecx # qhasm: in2 = z2 # asm 1: movd in2=int32#3 # asm 2: movd in2=%edx movd %xmm2,%edx # qhasm: in3 = z3 # asm 1: movd in3=int32#4 # asm 2: movd in3=%ebx movd %xmm3,%ebx # qhasm: in0 ^= *(uint32 *) (m + 192) # asm 1: xorl 192(z4=int6464#1 # asm 2: movdqa z4=%xmm0 movdqa 560(%esp),%xmm0 # qhasm: z5 = z5_stack # asm 1: movdqa z5=int6464#2 # asm 2: movdqa z5=%xmm1 movdqa 352(%esp),%xmm1 # qhasm: z6 = z6_stack # asm 1: movdqa z6=int6464#3 # asm 2: movdqa z6=%xmm2 movdqa 432(%esp),%xmm2 # qhasm: z7 = z7_stack # asm 1: movdqa z7=int6464#4 # asm 2: movdqa z7=%xmm3 movdqa 480(%esp),%xmm3 # qhasm: uint32323232 z4 += orig4 # asm 1: paddd in4=int32#1 # asm 2: movd in4=%eax movd %xmm0,%eax # qhasm: in5 = z5 # asm 1: movd in5=int32#2 # asm 2: movd in5=%ecx movd %xmm1,%ecx # qhasm: in6 = z6 # asm 1: movd in6=int32#3 # asm 2: movd in6=%edx movd %xmm2,%edx # qhasm: in7 = z7 # asm 1: movd in7=int32#4 # asm 2: movd in7=%ebx movd %xmm3,%ebx # qhasm: z4 <<<= 96 # asm 1: pshufd $0x39,in4=int32#1 # asm 2: movd in4=%eax movd %xmm0,%eax # qhasm: in5 = z5 # asm 1: movd in5=int32#2 # asm 2: movd in5=%ecx movd %xmm1,%ecx # qhasm: in6 = z6 # asm 1: movd in6=int32#3 # asm 2: movd in6=%edx movd %xmm2,%edx # qhasm: in7 = z7 # asm 1: movd in7=int32#4 # asm 2: movd in7=%ebx movd %xmm3,%ebx # qhasm: z4 <<<= 96 # asm 1: pshufd $0x39,in4=int32#1 # asm 2: movd in4=%eax movd %xmm0,%eax # qhasm: in5 = z5 # asm 1: movd in5=int32#2 # asm 2: movd in5=%ecx movd %xmm1,%ecx # qhasm: in6 = z6 # asm 1: movd in6=int32#3 # asm 2: movd in6=%edx movd %xmm2,%edx # qhasm: in7 = z7 # asm 1: movd in7=int32#4 # asm 2: movd in7=%ebx movd %xmm3,%ebx # qhasm: z4 <<<= 96 # asm 1: pshufd $0x39,in4=int32#1 # asm 2: movd in4=%eax movd %xmm0,%eax # qhasm: in5 = z5 # asm 1: movd in5=int32#2 # asm 2: movd in5=%ecx movd %xmm1,%ecx # qhasm: in6 = z6 # asm 1: movd in6=int32#3 # asm 2: movd in6=%edx movd %xmm2,%edx # qhasm: in7 = z7 # asm 1: movd in7=int32#4 # asm 2: movd in7=%ebx movd %xmm3,%ebx # qhasm: in4 ^= *(uint32 *) (m + 208) # asm 1: xorl 208(z8=int6464#1 # asm 2: movdqa z8=%xmm0 movdqa 608(%esp),%xmm0 # qhasm: z9 = z9_stack # asm 1: movdqa z9=int6464#2 # asm 2: movdqa z9=%xmm1 movdqa 528(%esp),%xmm1 # qhasm: z10 = z10_stack # asm 1: movdqa z10=int6464#3 # asm 2: movdqa z10=%xmm2 movdqa 368(%esp),%xmm2 # qhasm: z11 = z11_stack # asm 1: movdqa z11=int6464#4 # asm 2: movdqa z11=%xmm3 movdqa 448(%esp),%xmm3 # qhasm: uint32323232 z8 += orig8 # asm 1: paddd in8=int32#1 # asm 2: movd in8=%eax movd %xmm0,%eax # qhasm: in9 = z9 # asm 1: movd in9=int32#2 # asm 2: movd in9=%ecx movd %xmm1,%ecx # qhasm: in10 = z10 # asm 1: movd in10=int32#3 # asm 2: movd in10=%edx movd %xmm2,%edx # qhasm: in11 = z11 # asm 1: movd in11=int32#4 # asm 2: movd in11=%ebx movd %xmm3,%ebx # qhasm: z8 <<<= 96 # asm 1: pshufd $0x39,in8=int32#1 # asm 2: movd in8=%eax movd %xmm0,%eax # qhasm: in9 = z9 # asm 1: movd in9=int32#2 # asm 2: movd in9=%ecx movd %xmm1,%ecx # qhasm: in10 = z10 # asm 1: movd in10=int32#3 # asm 2: movd in10=%edx movd %xmm2,%edx # qhasm: in11 = z11 # asm 1: movd in11=int32#4 # asm 2: movd in11=%ebx movd %xmm3,%ebx # qhasm: z8 <<<= 96 # asm 1: pshufd $0x39,in8=int32#1 # asm 2: movd in8=%eax movd %xmm0,%eax # qhasm: in9 = z9 # asm 1: movd in9=int32#2 # asm 2: movd in9=%ecx movd %xmm1,%ecx # qhasm: in10 = z10 # asm 1: movd in10=int32#3 # asm 2: movd in10=%edx movd %xmm2,%edx # qhasm: in11 = z11 # asm 1: movd in11=int32#4 # asm 2: movd in11=%ebx movd %xmm3,%ebx # qhasm: z8 <<<= 96 # asm 1: pshufd $0x39,in8=int32#1 # asm 2: movd in8=%eax movd %xmm0,%eax # qhasm: in9 = z9 # asm 1: movd in9=int32#2 # asm 2: movd in9=%ecx movd %xmm1,%ecx # qhasm: in10 = z10 # asm 1: movd in10=int32#3 # asm 2: movd in10=%edx movd %xmm2,%edx # qhasm: in11 = z11 # asm 1: movd in11=int32#4 # asm 2: movd in11=%ebx movd %xmm3,%ebx # qhasm: in8 ^= *(uint32 *) (m + 224) # asm 1: xorl 224(z12=int6464#1 # asm 2: movdqa z12=%xmm0 movdqa 576(%esp),%xmm0 # qhasm: z13 = z13_stack # asm 1: movdqa z13=int6464#2 # asm 2: movdqa z13=%xmm1 movdqa 496(%esp),%xmm1 # qhasm: z14 = z14_stack # asm 1: movdqa z14=int6464#3 # asm 2: movdqa z14=%xmm2 movdqa 400(%esp),%xmm2 # qhasm: z15 = z15_stack # asm 1: movdqa z15=int6464#4 # asm 2: movdqa z15=%xmm3 movdqa 384(%esp),%xmm3 # qhasm: uint32323232 z12 += orig12 # asm 1: paddd in12=int32#1 # asm 2: movd in12=%eax movd %xmm0,%eax # qhasm: in13 = z13 # asm 1: movd in13=int32#2 # asm 2: movd in13=%ecx movd %xmm1,%ecx # qhasm: in14 = z14 # asm 1: movd in14=int32#3 # asm 2: movd in14=%edx movd %xmm2,%edx # qhasm: in15 = z15 # asm 1: movd in15=int32#4 # asm 2: movd in15=%ebx movd %xmm3,%ebx # qhasm: z12 <<<= 96 # asm 1: pshufd $0x39,in12=int32#1 # asm 2: movd in12=%eax movd %xmm0,%eax # qhasm: in13 = z13 # asm 1: movd in13=int32#2 # asm 2: movd in13=%ecx movd %xmm1,%ecx # qhasm: in14 = z14 # asm 1: movd in14=int32#3 # asm 2: movd in14=%edx movd %xmm2,%edx # qhasm: in15 = z15 # asm 1: movd in15=int32#4 # asm 2: movd in15=%ebx movd %xmm3,%ebx # qhasm: z12 <<<= 96 # asm 1: pshufd $0x39,in12=int32#1 # asm 2: movd in12=%eax movd %xmm0,%eax # qhasm: in13 = z13 # asm 1: movd in13=int32#2 # asm 2: movd in13=%ecx movd %xmm1,%ecx # qhasm: in14 = z14 # asm 1: movd in14=int32#3 # asm 2: movd in14=%edx movd %xmm2,%edx # qhasm: in15 = z15 # asm 1: movd in15=int32#4 # asm 2: movd in15=%ebx movd %xmm3,%ebx # qhasm: z12 <<<= 96 # asm 1: pshufd $0x39,in12=int32#1 # asm 2: movd in12=%eax movd %xmm0,%eax # qhasm: in13 = z13 # asm 1: movd in13=int32#2 # asm 2: movd in13=%ecx movd %xmm1,%ecx # qhasm: in14 = z14 # asm 1: movd in14=int32#3 # asm 2: movd in14=%edx movd %xmm2,%edx # qhasm: in15 = z15 # asm 1: movd in15=int32#4 # asm 2: movd in15=%ebx movd %xmm3,%ebx # qhasm: in12 ^= *(uint32 *) (m + 240) # asm 1: xorl 240(bytes=int32#1 # asm 2: movl bytes=%eax movl 24(%esp),%eax # qhasm: bytes -= 256 # asm 1: sub $256,out_stack=stack32#6 # asm 2: movl out_stack=20(%esp) movl %edi,20(%esp) # qhasm: unsigned? bytes - 0 # asm 1: cmp $0, jbe ._done # comment:fp stack unchanged by fallthrough # qhasm: bytesbetween1and255: ._bytesbetween1and255: # qhasm: unsignedctarget=stack32#6 # asm 2: movl ctarget=20(%esp) movl %edi,20(%esp) # qhasm: out = &tmp # asm 1: leal out=int32#6 # asm 2: leal out=%edi leal 640(%esp),%edi # qhasm: i = bytes # asm 1: mov i=int32#2 # asm 2: mov i=%ecx mov %eax,%ecx # qhasm: while (i) { *out++ = *m++; --i } rep movsb # qhasm: out = &tmp # asm 1: leal out=int32#6 # asm 2: leal out=%edi leal 640(%esp),%edi # qhasm: m = &tmp # asm 1: leal m=int32#5 # asm 2: leal m=%esi leal 640(%esp),%esi # comment:fp stack unchanged by fallthrough # qhasm: nocopy: ._nocopy: # qhasm: bytes_stack = bytes # asm 1: movl bytes_stack=stack32#7 # asm 2: movl bytes_stack=24(%esp) movl %eax,24(%esp) # qhasm: diag0 = x0 # asm 1: movdqa diag0=int6464#1 # asm 2: movdqa diag0=%xmm0 movdqa 64(%esp),%xmm0 # qhasm: diag1 = x1 # asm 1: movdqa diag1=int6464#2 # asm 2: movdqa diag1=%xmm1 movdqa 48(%esp),%xmm1 # qhasm: diag2 = x2 # asm 1: movdqa diag2=int6464#3 # asm 2: movdqa diag2=%xmm2 movdqa 80(%esp),%xmm2 # qhasm: diag3 = x3 # asm 1: movdqa diag3=int6464#4 # asm 2: movdqa diag3=%xmm3 movdqa 32(%esp),%xmm3 # qhasm: a0 = diag1 # asm 1: movdqa a0=int6464#5 # asm 2: movdqa a0=%xmm4 movdqa %xmm1,%xmm4 # qhasm: i = 12 # asm 1: mov $12,>i=int32#1 # asm 2: mov $12,>i=%eax mov $12,%eax # qhasm: mainloop2: ._mainloop2: # qhasm: uint32323232 a0 += diag0 # asm 1: paddd a1=int6464#6 # asm 2: movdqa a1=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b0 = a0 # asm 1: movdqa b0=int6464#7 # asm 2: movdqa b0=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a0 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a2=int6464#5 # asm 2: movdqa a2=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b1 = a1 # asm 1: movdqa b1=int6464#7 # asm 2: movdqa b1=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a1 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a3=int6464#6 # asm 2: movdqa a3=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b2 = a2 # asm 1: movdqa b2=int6464#7 # asm 2: movdqa b2=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a2 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,a4=int6464#5 # asm 2: movdqa a4=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b3 = a3 # asm 1: movdqa b3=int6464#7 # asm 2: movdqa b3=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a3 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,a5=int6464#6 # asm 2: movdqa a5=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b4 = a4 # asm 1: movdqa b4=int6464#7 # asm 2: movdqa b4=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a4 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a6=int6464#5 # asm 2: movdqa a6=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b5 = a5 # asm 1: movdqa b5=int6464#7 # asm 2: movdqa b5=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a5 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a7=int6464#6 # asm 2: movdqa a7=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b6 = a6 # asm 1: movdqa b6=int6464#7 # asm 2: movdqa b6=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a6 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,a0=int6464#5 # asm 2: movdqa a0=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b7 = a7 # asm 1: movdqa b7=int6464#7 # asm 2: movdqa b7=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a7 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,a1=int6464#6 # asm 2: movdqa a1=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b0 = a0 # asm 1: movdqa b0=int6464#7 # asm 2: movdqa b0=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a0 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a2=int6464#5 # asm 2: movdqa a2=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b1 = a1 # asm 1: movdqa b1=int6464#7 # asm 2: movdqa b1=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a1 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a3=int6464#6 # asm 2: movdqa a3=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b2 = a2 # asm 1: movdqa b2=int6464#7 # asm 2: movdqa b2=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a2 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,a4=int6464#5 # asm 2: movdqa a4=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b3 = a3 # asm 1: movdqa b3=int6464#7 # asm 2: movdqa b3=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a3 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,a5=int6464#6 # asm 2: movdqa a5=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b4 = a4 # asm 1: movdqa b4=int6464#7 # asm 2: movdqa b4=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a4 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a6=int6464#5 # asm 2: movdqa a6=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b5 = a5 # asm 1: movdqa b5=int6464#7 # asm 2: movdqa b5=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a5 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a7=int6464#6 # asm 2: movdqa a7=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b6 = a6 # asm 1: movdqa b6=int6464#7 # asm 2: movdqa b6=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a6 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,? i -= 4 # asm 1: sub $4,a0=int6464#5 # asm 2: movdqa a0=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b7 = a7 # asm 1: movdqa b7=int6464#7 # asm 2: movdqa b7=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a7 <<= 18 # asm 1: pslld $18,b0=int6464#8,>b0=int6464#8 # asm 2: pxor >b0=%xmm7,>b0=%xmm7 pxor %xmm7,%xmm7 # qhasm: uint32323232 b7 >>= 14 # asm 1: psrld $14, ja ._mainloop2 # qhasm: uint32323232 diag0 += x0 # asm 1: paddd in0=int32#1 # asm 2: movd in0=%eax movd %xmm0,%eax # qhasm: in12 = diag1 # asm 1: movd in12=int32#2 # asm 2: movd in12=%ecx movd %xmm1,%ecx # qhasm: in8 = diag2 # asm 1: movd in8=int32#3 # asm 2: movd in8=%edx movd %xmm2,%edx # qhasm: in4 = diag3 # asm 1: movd in4=int32#4 # asm 2: movd in4=%ebx movd %xmm3,%ebx # qhasm: diag0 <<<= 96 # asm 1: pshufd $0x39,in5=int32#1 # asm 2: movd in5=%eax movd %xmm0,%eax # qhasm: in1 = diag1 # asm 1: movd in1=int32#2 # asm 2: movd in1=%ecx movd %xmm1,%ecx # qhasm: in13 = diag2 # asm 1: movd in13=int32#3 # asm 2: movd in13=%edx movd %xmm2,%edx # qhasm: in9 = diag3 # asm 1: movd in9=int32#4 # asm 2: movd in9=%ebx movd %xmm3,%ebx # qhasm: diag0 <<<= 96 # asm 1: pshufd $0x39,in10=int32#1 # asm 2: movd in10=%eax movd %xmm0,%eax # qhasm: in6 = diag1 # asm 1: movd in6=int32#2 # asm 2: movd in6=%ecx movd %xmm1,%ecx # qhasm: in2 = diag2 # asm 1: movd in2=int32#3 # asm 2: movd in2=%edx movd %xmm2,%edx # qhasm: in14 = diag3 # asm 1: movd in14=int32#4 # asm 2: movd in14=%ebx movd %xmm3,%ebx # qhasm: diag0 <<<= 96 # asm 1: pshufd $0x39,in15=int32#1 # asm 2: movd in15=%eax movd %xmm0,%eax # qhasm: in11 = diag1 # asm 1: movd in11=int32#2 # asm 2: movd in11=%ecx movd %xmm1,%ecx # qhasm: in7 = diag2 # asm 1: movd in7=int32#3 # asm 2: movd in7=%edx movd %xmm2,%edx # qhasm: in3 = diag3 # asm 1: movd in3=int32#4 # asm 2: movd in3=%ebx movd %xmm3,%ebx # qhasm: in15 ^= *(uint32 *) (m + 60) # asm 1: xorl 60(bytes=int32#1 # asm 2: movl bytes=%eax movl 24(%esp),%eax # qhasm: in8 = ((uint32 *)&x2)[0] # asm 1: movl in8=int32#2 # asm 2: movl in8=%ecx movl 80(%esp),%ecx # qhasm: in9 = ((uint32 *)&x3)[1] # asm 1: movl 4+in9=int32#3 # asm 2: movl 4+in9=%edx movl 4+32(%esp),%edx # qhasm: carry? in8 += 1 # asm 1: add $1,x2=stack128#4 # asm 2: movl x2=80(%esp) movl %ecx,80(%esp) # qhasm: ((uint32 *)&x3)[1] = in9 # asm 1: movl ? unsigned ja ._bytesatleast65 # comment:fp stack unchanged by jump # qhasm: goto bytesatleast64 if !unsigned< jae ._bytesatleast64 # qhasm: m = out # asm 1: mov m=int32#5 # asm 2: mov m=%esi mov %edi,%esi # qhasm: out = ctarget # asm 1: movl out=int32#6 # asm 2: movl out=%edi movl 20(%esp),%edi # qhasm: i = bytes # asm 1: mov i=int32#2 # asm 2: mov i=%ecx mov %eax,%ecx # qhasm: while (i) { *out++ = *m++; --i } rep movsb # comment:fp stack unchanged by fallthrough # qhasm: bytesatleast64: ._bytesatleast64: # comment:fp stack unchanged by fallthrough # qhasm: done: ._done: # qhasm: eax = eax_stack # asm 1: movl eax=int32#1 # asm 2: movl eax=%eax movl 0(%esp),%eax # qhasm: ebx = ebx_stack # asm 1: movl ebx=int32#4 # asm 2: movl ebx=%ebx movl 4(%esp),%ebx # qhasm: esi = esi_stack # asm 1: movl esi=int32#5 # asm 2: movl esi=%esi movl 8(%esp),%esi # qhasm: edi = edi_stack # asm 1: movl edi=int32#6 # asm 2: movl edi=%edi movl 12(%esp),%edi # qhasm: ebp = ebp_stack # asm 1: movl ebp=int32#7 # asm 2: movl ebp=%ebp movl 16(%esp),%ebp # qhasm: leave add %eax,%esp xor %eax,%eax ret # qhasm: bytesatleast65: ._bytesatleast65: # qhasm: bytes -= 64 # asm 1: sub $64,r11_stack=stack64#1 # asm 2: movq r11_stack=352(%rsp) movq %r11,352(%rsp) # qhasm: r12_stack = r12_caller # asm 1: movq r12_stack=stack64#2 # asm 2: movq r12_stack=360(%rsp) movq %r12,360(%rsp) # qhasm: r13_stack = r13_caller # asm 1: movq r13_stack=stack64#3 # asm 2: movq r13_stack=368(%rsp) movq %r13,368(%rsp) # qhasm: r14_stack = r14_caller # asm 1: movq r14_stack=stack64#4 # asm 2: movq r14_stack=376(%rsp) movq %r14,376(%rsp) # qhasm: r15_stack = r15_caller # asm 1: movq r15_stack=stack64#5 # asm 2: movq r15_stack=384(%rsp) movq %r15,384(%rsp) # qhasm: rbx_stack = rbx_caller # asm 1: movq rbx_stack=stack64#6 # asm 2: movq rbx_stack=392(%rsp) movq %rbx,392(%rsp) # qhasm: rbp_stack = rbp_caller # asm 1: movq rbp_stack=stack64#7 # asm 2: movq rbp_stack=400(%rsp) movq %rbp,400(%rsp) # qhasm: bytes = arg2 # asm 1: mov bytes=int64#6 # asm 2: mov bytes=%r9 mov %rsi,%r9 # qhasm: out = arg1 # asm 1: mov out=int64#1 # asm 2: mov out=%rdi mov %rdi,%rdi # qhasm: m = out # asm 1: mov m=int64#2 # asm 2: mov m=%rsi mov %rdi,%rsi # qhasm: iv = arg3 # asm 1: mov iv=int64#3 # asm 2: mov iv=%rdx mov %rdx,%rdx # qhasm: k = arg4 # asm 1: mov k=int64#8 # asm 2: mov k=%r10 mov %rcx,%r10 # qhasm: unsigned>? bytes - 0 # asm 1: cmp $0, jbe ._done # qhasm: a = 0 # asm 1: mov $0,>a=int64#7 # asm 2: mov $0,>a=%rax mov $0,%rax # qhasm: i = bytes # asm 1: mov i=int64#4 # asm 2: mov i=%rcx mov %r9,%rcx # qhasm: while (i) { *out++ = a; --i } rep stosb # qhasm: out -= bytes # asm 1: sub r11_stack=stack64#1 # asm 2: movq r11_stack=352(%rsp) movq %r11,352(%rsp) # qhasm: r12_stack = r12_caller # asm 1: movq r12_stack=stack64#2 # asm 2: movq r12_stack=360(%rsp) movq %r12,360(%rsp) # qhasm: r13_stack = r13_caller # asm 1: movq r13_stack=stack64#3 # asm 2: movq r13_stack=368(%rsp) movq %r13,368(%rsp) # qhasm: r14_stack = r14_caller # asm 1: movq r14_stack=stack64#4 # asm 2: movq r14_stack=376(%rsp) movq %r14,376(%rsp) # qhasm: r15_stack = r15_caller # asm 1: movq r15_stack=stack64#5 # asm 2: movq r15_stack=384(%rsp) movq %r15,384(%rsp) # qhasm: rbx_stack = rbx_caller # asm 1: movq rbx_stack=stack64#6 # asm 2: movq rbx_stack=392(%rsp) movq %rbx,392(%rsp) # qhasm: rbp_stack = rbp_caller # asm 1: movq rbp_stack=stack64#7 # asm 2: movq rbp_stack=400(%rsp) movq %rbp,400(%rsp) # qhasm: out = arg1 # asm 1: mov out=int64#1 # asm 2: mov out=%rdi mov %rdi,%rdi # qhasm: m = arg2 # asm 1: mov m=int64#2 # asm 2: mov m=%rsi mov %rsi,%rsi # qhasm: bytes = arg3 # asm 1: mov bytes=int64#6 # asm 2: mov bytes=%r9 mov %rdx,%r9 # qhasm: iv = arg4 # asm 1: mov iv=int64#3 # asm 2: mov iv=%rdx mov %rcx,%rdx # qhasm: k = arg5 # asm 1: mov k=int64#8 # asm 2: mov k=%r10 mov %r8,%r10 # qhasm: unsigned>? bytes - 0 # asm 1: cmp $0, jbe ._done # comment:fp stack unchanged by fallthrough # qhasm: start: ._start: # qhasm: in12 = *(uint32 *) (k + 20) # asm 1: movl 20(in12=int64#4d # asm 2: movl 20(in12=%ecx movl 20(%r10),%ecx # qhasm: in1 = *(uint32 *) (k + 0) # asm 1: movl 0(in1=int64#5d # asm 2: movl 0(in1=%r8d movl 0(%r10),%r8d # qhasm: in6 = *(uint32 *) (iv + 0) # asm 1: movl 0(in6=int64#7d # asm 2: movl 0(in6=%eax movl 0(%rdx),%eax # qhasm: in11 = *(uint32 *) (k + 16) # asm 1: movl 16(in11=int64#9d # asm 2: movl 16(in11=%r11d movl 16(%r10),%r11d # qhasm: ((uint32 *)&x1)[0] = in12 # asm 1: movl x1=stack128#1 # asm 2: movl x1=0(%rsp) movl %ecx,0(%rsp) # qhasm: ((uint32 *)&x1)[1] = in1 # asm 1: movl in8=int64#4 # asm 2: mov $0,>in8=%rcx mov $0,%rcx # qhasm: in13 = *(uint32 *) (k + 24) # asm 1: movl 24(in13=int64#5d # asm 2: movl 24(in13=%r8d movl 24(%r10),%r8d # qhasm: in2 = *(uint32 *) (k + 4) # asm 1: movl 4(in2=int64#7d # asm 2: movl 4(in2=%eax movl 4(%r10),%eax # qhasm: in7 = *(uint32 *) (iv + 4) # asm 1: movl 4(in7=int64#3d # asm 2: movl 4(in7=%edx movl 4(%rdx),%edx # qhasm: ((uint32 *)&x2)[0] = in8 # asm 1: movl x2=stack128#2 # asm 2: movl x2=16(%rsp) movl %ecx,16(%rsp) # qhasm: ((uint32 *)&x2)[1] = in13 # asm 1: movl in4=int64#3d # asm 2: movl 12(in4=%edx movl 12(%r10),%edx # qhasm: in9 = 0 # asm 1: mov $0,>in9=int64#4 # asm 2: mov $0,>in9=%rcx mov $0,%rcx # qhasm: in14 = *(uint32 *) (k + 28) # asm 1: movl 28(in14=int64#5d # asm 2: movl 28(in14=%r8d movl 28(%r10),%r8d # qhasm: in3 = *(uint32 *) (k + 8) # asm 1: movl 8(in3=int64#7d # asm 2: movl 8(in3=%eax movl 8(%r10),%eax # qhasm: ((uint32 *)&x3)[0] = in4 # asm 1: movl x3=stack128#3 # asm 2: movl x3=32(%rsp) movl %edx,32(%rsp) # qhasm: ((uint32 *)&x3)[1] = in9 # asm 1: movl in0=int64#3 # asm 2: mov $1634760805,>in0=%rdx mov $1634760805,%rdx # qhasm: in5 = 857760878 # asm 1: mov $857760878,>in5=int64#4 # asm 2: mov $857760878,>in5=%rcx mov $857760878,%rcx # qhasm: in10 = 2036477234 # asm 1: mov $2036477234,>in10=int64#5 # asm 2: mov $2036477234,>in10=%r8 mov $2036477234,%r8 # qhasm: in15 = 1797285236 # asm 1: mov $1797285236,>in15=int64#7 # asm 2: mov $1797285236,>in15=%rax mov $1797285236,%rax # qhasm: ((uint32 *)&x0)[0] = in0 # asm 1: movl x0=stack128#4 # asm 2: movl x0=48(%rsp) movl %edx,48(%rsp) # qhasm: ((uint32 *)&x0)[1] = in5 # asm 1: movl z0=int6464#1 # asm 2: movdqa z0=%xmm0 movdqa 48(%rsp),%xmm0 # qhasm: z5 = z0[1,1,1,1] # asm 1: pshufd $0x55,z5=int6464#2 # asm 2: pshufd $0x55,z5=%xmm1 pshufd $0x55,%xmm0,%xmm1 # qhasm: z10 = z0[2,2,2,2] # asm 1: pshufd $0xaa,z10=int6464#3 # asm 2: pshufd $0xaa,z10=%xmm2 pshufd $0xaa,%xmm0,%xmm2 # qhasm: z15 = z0[3,3,3,3] # asm 1: pshufd $0xff,z15=int6464#4 # asm 2: pshufd $0xff,z15=%xmm3 pshufd $0xff,%xmm0,%xmm3 # qhasm: z0 = z0[0,0,0,0] # asm 1: pshufd $0x00,z0=int6464#1 # asm 2: pshufd $0x00,z0=%xmm0 pshufd $0x00,%xmm0,%xmm0 # qhasm: orig5 = z5 # asm 1: movdqa orig5=stack128#5 # asm 2: movdqa orig5=64(%rsp) movdqa %xmm1,64(%rsp) # qhasm: orig10 = z10 # asm 1: movdqa orig10=stack128#6 # asm 2: movdqa orig10=80(%rsp) movdqa %xmm2,80(%rsp) # qhasm: orig15 = z15 # asm 1: movdqa orig15=stack128#7 # asm 2: movdqa orig15=96(%rsp) movdqa %xmm3,96(%rsp) # qhasm: orig0 = z0 # asm 1: movdqa orig0=stack128#8 # asm 2: movdqa orig0=112(%rsp) movdqa %xmm0,112(%rsp) # qhasm: z1 = x1 # asm 1: movdqa z1=int6464#1 # asm 2: movdqa z1=%xmm0 movdqa 0(%rsp),%xmm0 # qhasm: z6 = z1[2,2,2,2] # asm 1: pshufd $0xaa,z6=int6464#2 # asm 2: pshufd $0xaa,z6=%xmm1 pshufd $0xaa,%xmm0,%xmm1 # qhasm: z11 = z1[3,3,3,3] # asm 1: pshufd $0xff,z11=int6464#3 # asm 2: pshufd $0xff,z11=%xmm2 pshufd $0xff,%xmm0,%xmm2 # qhasm: z12 = z1[0,0,0,0] # asm 1: pshufd $0x00,z12=int6464#4 # asm 2: pshufd $0x00,z12=%xmm3 pshufd $0x00,%xmm0,%xmm3 # qhasm: z1 = z1[1,1,1,1] # asm 1: pshufd $0x55,z1=int6464#1 # asm 2: pshufd $0x55,z1=%xmm0 pshufd $0x55,%xmm0,%xmm0 # qhasm: orig6 = z6 # asm 1: movdqa orig6=stack128#9 # asm 2: movdqa orig6=128(%rsp) movdqa %xmm1,128(%rsp) # qhasm: orig11 = z11 # asm 1: movdqa orig11=stack128#10 # asm 2: movdqa orig11=144(%rsp) movdqa %xmm2,144(%rsp) # qhasm: orig12 = z12 # asm 1: movdqa orig12=stack128#11 # asm 2: movdqa orig12=160(%rsp) movdqa %xmm3,160(%rsp) # qhasm: orig1 = z1 # asm 1: movdqa orig1=stack128#12 # asm 2: movdqa orig1=176(%rsp) movdqa %xmm0,176(%rsp) # qhasm: z2 = x2 # asm 1: movdqa z2=int6464#1 # asm 2: movdqa z2=%xmm0 movdqa 16(%rsp),%xmm0 # qhasm: z7 = z2[3,3,3,3] # asm 1: pshufd $0xff,z7=int6464#2 # asm 2: pshufd $0xff,z7=%xmm1 pshufd $0xff,%xmm0,%xmm1 # qhasm: z13 = z2[1,1,1,1] # asm 1: pshufd $0x55,z13=int6464#3 # asm 2: pshufd $0x55,z13=%xmm2 pshufd $0x55,%xmm0,%xmm2 # qhasm: z2 = z2[2,2,2,2] # asm 1: pshufd $0xaa,z2=int6464#1 # asm 2: pshufd $0xaa,z2=%xmm0 pshufd $0xaa,%xmm0,%xmm0 # qhasm: orig7 = z7 # asm 1: movdqa orig7=stack128#13 # asm 2: movdqa orig7=192(%rsp) movdqa %xmm1,192(%rsp) # qhasm: orig13 = z13 # asm 1: movdqa orig13=stack128#14 # asm 2: movdqa orig13=208(%rsp) movdqa %xmm2,208(%rsp) # qhasm: orig2 = z2 # asm 1: movdqa orig2=stack128#15 # asm 2: movdqa orig2=224(%rsp) movdqa %xmm0,224(%rsp) # qhasm: z3 = x3 # asm 1: movdqa z3=int6464#1 # asm 2: movdqa z3=%xmm0 movdqa 32(%rsp),%xmm0 # qhasm: z4 = z3[0,0,0,0] # asm 1: pshufd $0x00,z4=int6464#2 # asm 2: pshufd $0x00,z4=%xmm1 pshufd $0x00,%xmm0,%xmm1 # qhasm: z14 = z3[2,2,2,2] # asm 1: pshufd $0xaa,z14=int6464#3 # asm 2: pshufd $0xaa,z14=%xmm2 pshufd $0xaa,%xmm0,%xmm2 # qhasm: z3 = z3[3,3,3,3] # asm 1: pshufd $0xff,z3=int6464#1 # asm 2: pshufd $0xff,z3=%xmm0 pshufd $0xff,%xmm0,%xmm0 # qhasm: orig4 = z4 # asm 1: movdqa orig4=stack128#16 # asm 2: movdqa orig4=240(%rsp) movdqa %xmm1,240(%rsp) # qhasm: orig14 = z14 # asm 1: movdqa orig14=stack128#17 # asm 2: movdqa orig14=256(%rsp) movdqa %xmm2,256(%rsp) # qhasm: orig3 = z3 # asm 1: movdqa orig3=stack128#18 # asm 2: movdqa orig3=272(%rsp) movdqa %xmm0,272(%rsp) # qhasm: bytesatleast256: ._bytesatleast256: # qhasm: in8 = ((uint32 *)&x2)[0] # asm 1: movl in8=int64#3d # asm 2: movl in8=%edx movl 16(%rsp),%edx # qhasm: in9 = ((uint32 *)&x3)[1] # asm 1: movl 4+in9=int64#4d # asm 2: movl 4+in9=%ecx movl 4+32(%rsp),%ecx # qhasm: ((uint32 *) &orig8)[0] = in8 # asm 1: movl orig8=stack128#19 # asm 2: movl orig8=288(%rsp) movl %edx,288(%rsp) # qhasm: ((uint32 *) &orig9)[0] = in9 # asm 1: movl orig9=stack128#20 # asm 2: movl orig9=304(%rsp) movl %ecx,304(%rsp) # qhasm: in8 += 1 # asm 1: add $1,in9=int64#4 # asm 2: mov in9=%rcx mov %rdx,%rcx # qhasm: (uint64) in9 >>= 32 # asm 1: shr $32,in9=int64#4 # asm 2: mov in9=%rcx mov %rdx,%rcx # qhasm: (uint64) in9 >>= 32 # asm 1: shr $32,in9=int64#4 # asm 2: mov in9=%rcx mov %rdx,%rcx # qhasm: (uint64) in9 >>= 32 # asm 1: shr $32,in9=int64#4 # asm 2: mov in9=%rcx mov %rdx,%rcx # qhasm: (uint64) in9 >>= 32 # asm 1: shr $32,x2=stack128#2 # asm 2: movl x2=16(%rsp) movl %edx,16(%rsp) # qhasm: ((uint32 *)&x3)[1] = in9 # asm 1: movl bytes_backup=stack64#8 # asm 2: movq bytes_backup=408(%rsp) movq %r9,408(%rsp) # qhasm: i = 8 # asm 1: mov $8,>i=int64#3 # asm 2: mov $8,>i=%rdx mov $8,%rdx # qhasm: z5 = orig5 # asm 1: movdqa z5=int6464#1 # asm 2: movdqa z5=%xmm0 movdqa 64(%rsp),%xmm0 # qhasm: z10 = orig10 # asm 1: movdqa z10=int6464#2 # asm 2: movdqa z10=%xmm1 movdqa 80(%rsp),%xmm1 # qhasm: z15 = orig15 # asm 1: movdqa z15=int6464#3 # asm 2: movdqa z15=%xmm2 movdqa 96(%rsp),%xmm2 # qhasm: z14 = orig14 # asm 1: movdqa z14=int6464#4 # asm 2: movdqa z14=%xmm3 movdqa 256(%rsp),%xmm3 # qhasm: z3 = orig3 # asm 1: movdqa z3=int6464#5 # asm 2: movdqa z3=%xmm4 movdqa 272(%rsp),%xmm4 # qhasm: z6 = orig6 # asm 1: movdqa z6=int6464#6 # asm 2: movdqa z6=%xmm5 movdqa 128(%rsp),%xmm5 # qhasm: z11 = orig11 # asm 1: movdqa z11=int6464#7 # asm 2: movdqa z11=%xmm6 movdqa 144(%rsp),%xmm6 # qhasm: z1 = orig1 # asm 1: movdqa z1=int6464#8 # asm 2: movdqa z1=%xmm7 movdqa 176(%rsp),%xmm7 # qhasm: z7 = orig7 # asm 1: movdqa z7=int6464#9 # asm 2: movdqa z7=%xmm8 movdqa 192(%rsp),%xmm8 # qhasm: z13 = orig13 # asm 1: movdqa z13=int6464#10 # asm 2: movdqa z13=%xmm9 movdqa 208(%rsp),%xmm9 # qhasm: z2 = orig2 # asm 1: movdqa z2=int6464#11 # asm 2: movdqa z2=%xmm10 movdqa 224(%rsp),%xmm10 # qhasm: z9 = orig9 # asm 1: movdqa z9=int6464#12 # asm 2: movdqa z9=%xmm11 movdqa 304(%rsp),%xmm11 # qhasm: z0 = orig0 # asm 1: movdqa z0=int6464#13 # asm 2: movdqa z0=%xmm12 movdqa 112(%rsp),%xmm12 # qhasm: z12 = orig12 # asm 1: movdqa z12=int6464#14 # asm 2: movdqa z12=%xmm13 movdqa 160(%rsp),%xmm13 # qhasm: z4 = orig4 # asm 1: movdqa z4=int6464#15 # asm 2: movdqa z4=%xmm14 movdqa 240(%rsp),%xmm14 # qhasm: z8 = orig8 # asm 1: movdqa z8=int6464#16 # asm 2: movdqa z8=%xmm15 movdqa 288(%rsp),%xmm15 # qhasm: mainloop1: ._mainloop1: # qhasm: z10_stack = z10 # asm 1: movdqa z10_stack=stack128#21 # asm 2: movdqa z10_stack=320(%rsp) movdqa %xmm1,320(%rsp) # qhasm: z15_stack = z15 # asm 1: movdqa z15_stack=stack128#22 # asm 2: movdqa z15_stack=336(%rsp) movdqa %xmm2,336(%rsp) # qhasm: y4 = z12 # asm 1: movdqa y4=int6464#2 # asm 2: movdqa y4=%xmm1 movdqa %xmm13,%xmm1 # qhasm: uint32323232 y4 += z0 # asm 1: paddd r4=int6464#3 # asm 2: movdqa r4=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y4 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,y9=int6464#2 # asm 2: movdqa y9=%xmm1 movdqa %xmm7,%xmm1 # qhasm: uint32323232 y9 += z5 # asm 1: paddd r9=int6464#3 # asm 2: movdqa r9=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y9 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,y8=int6464#2 # asm 2: movdqa y8=%xmm1 movdqa %xmm12,%xmm1 # qhasm: uint32323232 y8 += z4 # asm 1: paddd r8=int6464#3 # asm 2: movdqa r8=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y8 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y13=int6464#2 # asm 2: movdqa y13=%xmm1 movdqa %xmm0,%xmm1 # qhasm: uint32323232 y13 += z9 # asm 1: paddd r13=int6464#3 # asm 2: movdqa r13=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y13 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y12=int6464#2 # asm 2: movdqa y12=%xmm1 movdqa %xmm14,%xmm1 # qhasm: uint32323232 y12 += z8 # asm 1: paddd r12=int6464#3 # asm 2: movdqa r12=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y12 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y1=int6464#2 # asm 2: movdqa y1=%xmm1 movdqa %xmm11,%xmm1 # qhasm: uint32323232 y1 += z13 # asm 1: paddd r1=int6464#3 # asm 2: movdqa r1=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y1 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y0=int6464#2 # asm 2: movdqa y0=%xmm1 movdqa %xmm15,%xmm1 # qhasm: uint32323232 y0 += z12 # asm 1: paddd r0=int6464#3 # asm 2: movdqa r0=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y0 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,z10=int6464#2 # asm 2: movdqa z10=%xmm1 movdqa 320(%rsp),%xmm1 # qhasm: z0_stack = z0 # asm 1: movdqa z0_stack=stack128#21 # asm 2: movdqa z0_stack=320(%rsp) movdqa %xmm12,320(%rsp) # qhasm: y5 = z13 # asm 1: movdqa y5=int6464#3 # asm 2: movdqa y5=%xmm2 movdqa %xmm9,%xmm2 # qhasm: uint32323232 y5 += z1 # asm 1: paddd r5=int6464#13 # asm 2: movdqa r5=%xmm12 movdqa %xmm2,%xmm12 # qhasm: uint32323232 y5 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,y14=int6464#3 # asm 2: movdqa y14=%xmm2 movdqa %xmm5,%xmm2 # qhasm: uint32323232 y14 += z10 # asm 1: paddd r14=int6464#13 # asm 2: movdqa r14=%xmm12 movdqa %xmm2,%xmm12 # qhasm: uint32323232 y14 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,z15=int6464#3 # asm 2: movdqa z15=%xmm2 movdqa 336(%rsp),%xmm2 # qhasm: z5_stack = z5 # asm 1: movdqa z5_stack=stack128#22 # asm 2: movdqa z5_stack=336(%rsp) movdqa %xmm0,336(%rsp) # qhasm: y3 = z11 # asm 1: movdqa y3=int6464#1 # asm 2: movdqa y3=%xmm0 movdqa %xmm6,%xmm0 # qhasm: uint32323232 y3 += z15 # asm 1: paddd r3=int6464#13 # asm 2: movdqa r3=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y3 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,y2=int6464#1 # asm 2: movdqa y2=%xmm0 movdqa %xmm1,%xmm0 # qhasm: uint32323232 y2 += z14 # asm 1: paddd r2=int6464#13 # asm 2: movdqa r2=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y2 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y7=int6464#1 # asm 2: movdqa y7=%xmm0 movdqa %xmm2,%xmm0 # qhasm: uint32323232 y7 += z3 # asm 1: paddd r7=int6464#13 # asm 2: movdqa r7=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y7 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y6=int6464#1 # asm 2: movdqa y6=%xmm0 movdqa %xmm3,%xmm0 # qhasm: uint32323232 y6 += z2 # asm 1: paddd r6=int6464#13 # asm 2: movdqa r6=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y6 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y11=int6464#1 # asm 2: movdqa y11=%xmm0 movdqa %xmm4,%xmm0 # qhasm: uint32323232 y11 += z7 # asm 1: paddd r11=int6464#13 # asm 2: movdqa r11=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y11 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y10=int6464#1 # asm 2: movdqa y10=%xmm0 movdqa %xmm10,%xmm0 # qhasm: uint32323232 y10 += z6 # asm 1: paddd r10=int6464#13 # asm 2: movdqa r10=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y10 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,z0=int6464#1 # asm 2: movdqa z0=%xmm0 movdqa 320(%rsp),%xmm0 # qhasm: z10_stack = z10 # asm 1: movdqa z10_stack=stack128#21 # asm 2: movdqa z10_stack=320(%rsp) movdqa %xmm1,320(%rsp) # qhasm: y1 = z3 # asm 1: movdqa y1=int6464#2 # asm 2: movdqa y1=%xmm1 movdqa %xmm4,%xmm1 # qhasm: uint32323232 y1 += z0 # asm 1: paddd r1=int6464#13 # asm 2: movdqa r1=%xmm12 movdqa %xmm1,%xmm12 # qhasm: uint32323232 y1 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,y15=int6464#2 # asm 2: movdqa y15=%xmm1 movdqa %xmm8,%xmm1 # qhasm: uint32323232 y15 += z11 # asm 1: paddd r15=int6464#13 # asm 2: movdqa r15=%xmm12 movdqa %xmm1,%xmm12 # qhasm: uint32323232 y15 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,z5=int6464#13 # asm 2: movdqa z5=%xmm12 movdqa 336(%rsp),%xmm12 # qhasm: z15_stack = z15 # asm 1: movdqa z15_stack=stack128#22 # asm 2: movdqa z15_stack=336(%rsp) movdqa %xmm2,336(%rsp) # qhasm: y6 = z4 # asm 1: movdqa y6=int6464#2 # asm 2: movdqa y6=%xmm1 movdqa %xmm14,%xmm1 # qhasm: uint32323232 y6 += z5 # asm 1: paddd r6=int6464#3 # asm 2: movdqa r6=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y6 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,y2=int6464#2 # asm 2: movdqa y2=%xmm1 movdqa %xmm0,%xmm1 # qhasm: uint32323232 y2 += z1 # asm 1: paddd r2=int6464#3 # asm 2: movdqa r2=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y2 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y7=int6464#2 # asm 2: movdqa y7=%xmm1 movdqa %xmm12,%xmm1 # qhasm: uint32323232 y7 += z6 # asm 1: paddd r7=int6464#3 # asm 2: movdqa r7=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y7 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y3=int6464#2 # asm 2: movdqa y3=%xmm1 movdqa %xmm7,%xmm1 # qhasm: uint32323232 y3 += z2 # asm 1: paddd r3=int6464#3 # asm 2: movdqa r3=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y3 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y4=int6464#2 # asm 2: movdqa y4=%xmm1 movdqa %xmm5,%xmm1 # qhasm: uint32323232 y4 += z7 # asm 1: paddd r4=int6464#3 # asm 2: movdqa r4=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y4 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y0=int6464#2 # asm 2: movdqa y0=%xmm1 movdqa %xmm10,%xmm1 # qhasm: uint32323232 y0 += z3 # asm 1: paddd r0=int6464#3 # asm 2: movdqa r0=%xmm2 movdqa %xmm1,%xmm2 # qhasm: uint32323232 y0 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,z10=int6464#2 # asm 2: movdqa z10=%xmm1 movdqa 320(%rsp),%xmm1 # qhasm: z0_stack = z0 # asm 1: movdqa z0_stack=stack128#21 # asm 2: movdqa z0_stack=320(%rsp) movdqa %xmm0,320(%rsp) # qhasm: y5 = z7 # asm 1: movdqa y5=int6464#1 # asm 2: movdqa y5=%xmm0 movdqa %xmm8,%xmm0 # qhasm: uint32323232 y5 += z4 # asm 1: paddd r5=int6464#3 # asm 2: movdqa r5=%xmm2 movdqa %xmm0,%xmm2 # qhasm: uint32323232 y5 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,y11=int6464#1 # asm 2: movdqa y11=%xmm0 movdqa %xmm11,%xmm0 # qhasm: uint32323232 y11 += z10 # asm 1: paddd r11=int6464#3 # asm 2: movdqa r11=%xmm2 movdqa %xmm0,%xmm2 # qhasm: uint32323232 y11 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,z15=int6464#3 # asm 2: movdqa z15=%xmm2 movdqa 336(%rsp),%xmm2 # qhasm: z5_stack = z5 # asm 1: movdqa z5_stack=stack128#22 # asm 2: movdqa z5_stack=336(%rsp) movdqa %xmm12,336(%rsp) # qhasm: y12 = z14 # asm 1: movdqa y12=int6464#1 # asm 2: movdqa y12=%xmm0 movdqa %xmm3,%xmm0 # qhasm: uint32323232 y12 += z15 # asm 1: paddd r12=int6464#13 # asm 2: movdqa r12=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y12 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,y8=int6464#1 # asm 2: movdqa y8=%xmm0 movdqa %xmm1,%xmm0 # qhasm: uint32323232 y8 += z11 # asm 1: paddd r8=int6464#13 # asm 2: movdqa r8=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y8 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y13=int6464#1 # asm 2: movdqa y13=%xmm0 movdqa %xmm2,%xmm0 # qhasm: uint32323232 y13 += z12 # asm 1: paddd r13=int6464#13 # asm 2: movdqa r13=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y13 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,y9=int6464#1 # asm 2: movdqa y9=%xmm0 movdqa %xmm6,%xmm0 # qhasm: uint32323232 y9 += z8 # asm 1: paddd r9=int6464#13 # asm 2: movdqa r9=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y9 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y14=int6464#1 # asm 2: movdqa y14=%xmm0 movdqa %xmm13,%xmm0 # qhasm: uint32323232 y14 += z13 # asm 1: paddd r14=int6464#13 # asm 2: movdqa r14=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y14 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,y10=int6464#1 # asm 2: movdqa y10=%xmm0 movdqa %xmm15,%xmm0 # qhasm: uint32323232 y10 += z9 # asm 1: paddd r10=int6464#13 # asm 2: movdqa r10=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y10 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,y15=int6464#1 # asm 2: movdqa y15=%xmm0 movdqa %xmm9,%xmm0 # qhasm: uint32323232 y15 += z14 # asm 1: paddd r15=int6464#13 # asm 2: movdqa r15=%xmm12 movdqa %xmm0,%xmm12 # qhasm: uint32323232 y15 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,z0=int6464#13 # asm 2: movdqa z0=%xmm12 movdqa 320(%rsp),%xmm12 # qhasm: z5 = z5_stack # asm 1: movdqa z5=int6464#1 # asm 2: movdqa z5=%xmm0 movdqa 336(%rsp),%xmm0 # qhasm: unsigned>? i -= 2 # asm 1: sub $2, ja ._mainloop1 # qhasm: uint32323232 z0 += orig0 # asm 1: paddd in0=int64#3 # asm 2: movd in0=%rdx movd %xmm12,%rdx # qhasm: in1 = z1 # asm 1: movd in1=int64#4 # asm 2: movd in1=%rcx movd %xmm7,%rcx # qhasm: in2 = z2 # asm 1: movd in2=int64#5 # asm 2: movd in2=%r8 movd %xmm10,%r8 # qhasm: in3 = z3 # asm 1: movd in3=int64#6 # asm 2: movd in3=%r9 movd %xmm4,%r9 # qhasm: z0 <<<= 96 # asm 1: pshufd $0x39,in0=int64#3 # asm 2: movd in0=%rdx movd %xmm12,%rdx # qhasm: in1 = z1 # asm 1: movd in1=int64#4 # asm 2: movd in1=%rcx movd %xmm7,%rcx # qhasm: in2 = z2 # asm 1: movd in2=int64#5 # asm 2: movd in2=%r8 movd %xmm10,%r8 # qhasm: in3 = z3 # asm 1: movd in3=int64#6 # asm 2: movd in3=%r9 movd %xmm4,%r9 # qhasm: z0 <<<= 96 # asm 1: pshufd $0x39,in0=int64#3 # asm 2: movd in0=%rdx movd %xmm12,%rdx # qhasm: in1 = z1 # asm 1: movd in1=int64#4 # asm 2: movd in1=%rcx movd %xmm7,%rcx # qhasm: in2 = z2 # asm 1: movd in2=int64#5 # asm 2: movd in2=%r8 movd %xmm10,%r8 # qhasm: in3 = z3 # asm 1: movd in3=int64#6 # asm 2: movd in3=%r9 movd %xmm4,%r9 # qhasm: z0 <<<= 96 # asm 1: pshufd $0x39,in0=int64#3 # asm 2: movd in0=%rdx movd %xmm12,%rdx # qhasm: in1 = z1 # asm 1: movd in1=int64#4 # asm 2: movd in1=%rcx movd %xmm7,%rcx # qhasm: in2 = z2 # asm 1: movd in2=int64#5 # asm 2: movd in2=%r8 movd %xmm10,%r8 # qhasm: in3 = z3 # asm 1: movd in3=int64#6 # asm 2: movd in3=%r9 movd %xmm4,%r9 # qhasm: (uint32) in0 ^= *(uint32 *) (m + 192) # asm 1: xorl 192(in4=int64#3 # asm 2: movd in4=%rdx movd %xmm14,%rdx # qhasm: in5 = z5 # asm 1: movd in5=int64#4 # asm 2: movd in5=%rcx movd %xmm0,%rcx # qhasm: in6 = z6 # asm 1: movd in6=int64#5 # asm 2: movd in6=%r8 movd %xmm5,%r8 # qhasm: in7 = z7 # asm 1: movd in7=int64#6 # asm 2: movd in7=%r9 movd %xmm8,%r9 # qhasm: z4 <<<= 96 # asm 1: pshufd $0x39,in4=int64#3 # asm 2: movd in4=%rdx movd %xmm14,%rdx # qhasm: in5 = z5 # asm 1: movd in5=int64#4 # asm 2: movd in5=%rcx movd %xmm0,%rcx # qhasm: in6 = z6 # asm 1: movd in6=int64#5 # asm 2: movd in6=%r8 movd %xmm5,%r8 # qhasm: in7 = z7 # asm 1: movd in7=int64#6 # asm 2: movd in7=%r9 movd %xmm8,%r9 # qhasm: z4 <<<= 96 # asm 1: pshufd $0x39,in4=int64#3 # asm 2: movd in4=%rdx movd %xmm14,%rdx # qhasm: in5 = z5 # asm 1: movd in5=int64#4 # asm 2: movd in5=%rcx movd %xmm0,%rcx # qhasm: in6 = z6 # asm 1: movd in6=int64#5 # asm 2: movd in6=%r8 movd %xmm5,%r8 # qhasm: in7 = z7 # asm 1: movd in7=int64#6 # asm 2: movd in7=%r9 movd %xmm8,%r9 # qhasm: z4 <<<= 96 # asm 1: pshufd $0x39,in4=int64#3 # asm 2: movd in4=%rdx movd %xmm14,%rdx # qhasm: in5 = z5 # asm 1: movd in5=int64#4 # asm 2: movd in5=%rcx movd %xmm0,%rcx # qhasm: in6 = z6 # asm 1: movd in6=int64#5 # asm 2: movd in6=%r8 movd %xmm5,%r8 # qhasm: in7 = z7 # asm 1: movd in7=int64#6 # asm 2: movd in7=%r9 movd %xmm8,%r9 # qhasm: (uint32) in4 ^= *(uint32 *) (m + 208) # asm 1: xorl 208(in8=int64#3 # asm 2: movd in8=%rdx movd %xmm15,%rdx # qhasm: in9 = z9 # asm 1: movd in9=int64#4 # asm 2: movd in9=%rcx movd %xmm11,%rcx # qhasm: in10 = z10 # asm 1: movd in10=int64#5 # asm 2: movd in10=%r8 movd %xmm1,%r8 # qhasm: in11 = z11 # asm 1: movd in11=int64#6 # asm 2: movd in11=%r9 movd %xmm6,%r9 # qhasm: z8 <<<= 96 # asm 1: pshufd $0x39,in8=int64#3 # asm 2: movd in8=%rdx movd %xmm15,%rdx # qhasm: in9 = z9 # asm 1: movd in9=int64#4 # asm 2: movd in9=%rcx movd %xmm11,%rcx # qhasm: in10 = z10 # asm 1: movd in10=int64#5 # asm 2: movd in10=%r8 movd %xmm1,%r8 # qhasm: in11 = z11 # asm 1: movd in11=int64#6 # asm 2: movd in11=%r9 movd %xmm6,%r9 # qhasm: z8 <<<= 96 # asm 1: pshufd $0x39,in8=int64#3 # asm 2: movd in8=%rdx movd %xmm15,%rdx # qhasm: in9 = z9 # asm 1: movd in9=int64#4 # asm 2: movd in9=%rcx movd %xmm11,%rcx # qhasm: in10 = z10 # asm 1: movd in10=int64#5 # asm 2: movd in10=%r8 movd %xmm1,%r8 # qhasm: in11 = z11 # asm 1: movd in11=int64#6 # asm 2: movd in11=%r9 movd %xmm6,%r9 # qhasm: z8 <<<= 96 # asm 1: pshufd $0x39,in8=int64#3 # asm 2: movd in8=%rdx movd %xmm15,%rdx # qhasm: in9 = z9 # asm 1: movd in9=int64#4 # asm 2: movd in9=%rcx movd %xmm11,%rcx # qhasm: in10 = z10 # asm 1: movd in10=int64#5 # asm 2: movd in10=%r8 movd %xmm1,%r8 # qhasm: in11 = z11 # asm 1: movd in11=int64#6 # asm 2: movd in11=%r9 movd %xmm6,%r9 # qhasm: (uint32) in8 ^= *(uint32 *) (m + 224) # asm 1: xorl 224(in12=int64#3 # asm 2: movd in12=%rdx movd %xmm13,%rdx # qhasm: in13 = z13 # asm 1: movd in13=int64#4 # asm 2: movd in13=%rcx movd %xmm9,%rcx # qhasm: in14 = z14 # asm 1: movd in14=int64#5 # asm 2: movd in14=%r8 movd %xmm3,%r8 # qhasm: in15 = z15 # asm 1: movd in15=int64#6 # asm 2: movd in15=%r9 movd %xmm2,%r9 # qhasm: z12 <<<= 96 # asm 1: pshufd $0x39,in12=int64#3 # asm 2: movd in12=%rdx movd %xmm13,%rdx # qhasm: in13 = z13 # asm 1: movd in13=int64#4 # asm 2: movd in13=%rcx movd %xmm9,%rcx # qhasm: in14 = z14 # asm 1: movd in14=int64#5 # asm 2: movd in14=%r8 movd %xmm3,%r8 # qhasm: in15 = z15 # asm 1: movd in15=int64#6 # asm 2: movd in15=%r9 movd %xmm2,%r9 # qhasm: z12 <<<= 96 # asm 1: pshufd $0x39,in12=int64#3 # asm 2: movd in12=%rdx movd %xmm13,%rdx # qhasm: in13 = z13 # asm 1: movd in13=int64#4 # asm 2: movd in13=%rcx movd %xmm9,%rcx # qhasm: in14 = z14 # asm 1: movd in14=int64#5 # asm 2: movd in14=%r8 movd %xmm3,%r8 # qhasm: in15 = z15 # asm 1: movd in15=int64#6 # asm 2: movd in15=%r9 movd %xmm2,%r9 # qhasm: z12 <<<= 96 # asm 1: pshufd $0x39,in12=int64#3 # asm 2: movd in12=%rdx movd %xmm13,%rdx # qhasm: in13 = z13 # asm 1: movd in13=int64#4 # asm 2: movd in13=%rcx movd %xmm9,%rcx # qhasm: in14 = z14 # asm 1: movd in14=int64#5 # asm 2: movd in14=%r8 movd %xmm3,%r8 # qhasm: in15 = z15 # asm 1: movd in15=int64#6 # asm 2: movd in15=%r9 movd %xmm2,%r9 # qhasm: (uint32) in12 ^= *(uint32 *) (m + 240) # asm 1: xorl 240(bytes=int64#6 # asm 2: movq bytes=%r9 movq 408(%rsp),%r9 # qhasm: bytes -= 256 # asm 1: sub $256,? bytes - 0 # asm 1: cmp $0, jbe ._done # comment:fp stack unchanged by fallthrough # qhasm: bytesbetween1and255: ._bytesbetween1and255: # qhasm: unsignedctarget=int64#3 # asm 2: mov ctarget=%rdx mov %rdi,%rdx # qhasm: out = &tmp # asm 1: leaq out=int64#1 # asm 2: leaq out=%rdi leaq 416(%rsp),%rdi # qhasm: i = bytes # asm 1: mov i=int64#4 # asm 2: mov i=%rcx mov %r9,%rcx # qhasm: while (i) { *out++ = *m++; --i } rep movsb # qhasm: out = &tmp # asm 1: leaq out=int64#1 # asm 2: leaq out=%rdi leaq 416(%rsp),%rdi # qhasm: m = &tmp # asm 1: leaq m=int64#2 # asm 2: leaq m=%rsi leaq 416(%rsp),%rsi # comment:fp stack unchanged by fallthrough # qhasm: nocopy: ._nocopy: # qhasm: bytes_backup = bytes # asm 1: movq bytes_backup=stack64#8 # asm 2: movq bytes_backup=408(%rsp) movq %r9,408(%rsp) # qhasm: diag0 = x0 # asm 1: movdqa diag0=int6464#1 # asm 2: movdqa diag0=%xmm0 movdqa 48(%rsp),%xmm0 # qhasm: diag1 = x1 # asm 1: movdqa diag1=int6464#2 # asm 2: movdqa diag1=%xmm1 movdqa 0(%rsp),%xmm1 # qhasm: diag2 = x2 # asm 1: movdqa diag2=int6464#3 # asm 2: movdqa diag2=%xmm2 movdqa 16(%rsp),%xmm2 # qhasm: diag3 = x3 # asm 1: movdqa diag3=int6464#4 # asm 2: movdqa diag3=%xmm3 movdqa 32(%rsp),%xmm3 # qhasm: a0 = diag1 # asm 1: movdqa a0=int6464#5 # asm 2: movdqa a0=%xmm4 movdqa %xmm1,%xmm4 # qhasm: i = 8 # asm 1: mov $8,>i=int64#4 # asm 2: mov $8,>i=%rcx mov $8,%rcx # qhasm: mainloop2: ._mainloop2: # qhasm: uint32323232 a0 += diag0 # asm 1: paddd a1=int6464#6 # asm 2: movdqa a1=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b0 = a0 # asm 1: movdqa b0=int6464#7 # asm 2: movdqa b0=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a0 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a2=int6464#5 # asm 2: movdqa a2=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b1 = a1 # asm 1: movdqa b1=int6464#7 # asm 2: movdqa b1=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a1 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a3=int6464#6 # asm 2: movdqa a3=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b2 = a2 # asm 1: movdqa b2=int6464#7 # asm 2: movdqa b2=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a2 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,a4=int6464#5 # asm 2: movdqa a4=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b3 = a3 # asm 1: movdqa b3=int6464#7 # asm 2: movdqa b3=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a3 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,a5=int6464#6 # asm 2: movdqa a5=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b4 = a4 # asm 1: movdqa b4=int6464#7 # asm 2: movdqa b4=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a4 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a6=int6464#5 # asm 2: movdqa a6=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b5 = a5 # asm 1: movdqa b5=int6464#7 # asm 2: movdqa b5=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a5 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a7=int6464#6 # asm 2: movdqa a7=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b6 = a6 # asm 1: movdqa b6=int6464#7 # asm 2: movdqa b6=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a6 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,a0=int6464#5 # asm 2: movdqa a0=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b7 = a7 # asm 1: movdqa b7=int6464#7 # asm 2: movdqa b7=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a7 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,a1=int6464#6 # asm 2: movdqa a1=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b0 = a0 # asm 1: movdqa b0=int6464#7 # asm 2: movdqa b0=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a0 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a2=int6464#5 # asm 2: movdqa a2=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b1 = a1 # asm 1: movdqa b1=int6464#7 # asm 2: movdqa b1=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a1 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a3=int6464#6 # asm 2: movdqa a3=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b2 = a2 # asm 1: movdqa b2=int6464#7 # asm 2: movdqa b2=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a2 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,a4=int6464#5 # asm 2: movdqa a4=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b3 = a3 # asm 1: movdqa b3=int6464#7 # asm 2: movdqa b3=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a3 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,a5=int6464#6 # asm 2: movdqa a5=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b4 = a4 # asm 1: movdqa b4=int6464#7 # asm 2: movdqa b4=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a4 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a6=int6464#5 # asm 2: movdqa a6=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b5 = a5 # asm 1: movdqa b5=int6464#7 # asm 2: movdqa b5=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a5 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a7=int6464#6 # asm 2: movdqa a7=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b6 = a6 # asm 1: movdqa b6=int6464#7 # asm 2: movdqa b6=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a6 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,? i -= 4 # asm 1: sub $4,a0=int6464#5 # asm 2: movdqa a0=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b7 = a7 # asm 1: movdqa b7=int6464#7 # asm 2: movdqa b7=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a7 <<= 18 # asm 1: pslld $18,b0=int6464#8,>b0=int6464#8 # asm 2: pxor >b0=%xmm7,>b0=%xmm7 pxor %xmm7,%xmm7 # qhasm: uint32323232 b7 >>= 14 # asm 1: psrld $14, ja ._mainloop2 # qhasm: uint32323232 diag0 += x0 # asm 1: paddd in0=int64#4 # asm 2: movd in0=%rcx movd %xmm0,%rcx # qhasm: in12 = diag1 # asm 1: movd in12=int64#5 # asm 2: movd in12=%r8 movd %xmm1,%r8 # qhasm: in8 = diag2 # asm 1: movd in8=int64#6 # asm 2: movd in8=%r9 movd %xmm2,%r9 # qhasm: in4 = diag3 # asm 1: movd in4=int64#7 # asm 2: movd in4=%rax movd %xmm3,%rax # qhasm: diag0 <<<= 96 # asm 1: pshufd $0x39,in5=int64#4 # asm 2: movd in5=%rcx movd %xmm0,%rcx # qhasm: in1 = diag1 # asm 1: movd in1=int64#5 # asm 2: movd in1=%r8 movd %xmm1,%r8 # qhasm: in13 = diag2 # asm 1: movd in13=int64#6 # asm 2: movd in13=%r9 movd %xmm2,%r9 # qhasm: in9 = diag3 # asm 1: movd in9=int64#7 # asm 2: movd in9=%rax movd %xmm3,%rax # qhasm: diag0 <<<= 96 # asm 1: pshufd $0x39,in10=int64#4 # asm 2: movd in10=%rcx movd %xmm0,%rcx # qhasm: in6 = diag1 # asm 1: movd in6=int64#5 # asm 2: movd in6=%r8 movd %xmm1,%r8 # qhasm: in2 = diag2 # asm 1: movd in2=int64#6 # asm 2: movd in2=%r9 movd %xmm2,%r9 # qhasm: in14 = diag3 # asm 1: movd in14=int64#7 # asm 2: movd in14=%rax movd %xmm3,%rax # qhasm: diag0 <<<= 96 # asm 1: pshufd $0x39,in15=int64#4 # asm 2: movd in15=%rcx movd %xmm0,%rcx # qhasm: in11 = diag1 # asm 1: movd in11=int64#5 # asm 2: movd in11=%r8 movd %xmm1,%r8 # qhasm: in7 = diag2 # asm 1: movd in7=int64#6 # asm 2: movd in7=%r9 movd %xmm2,%r9 # qhasm: in3 = diag3 # asm 1: movd in3=int64#7 # asm 2: movd in3=%rax movd %xmm3,%rax # qhasm: (uint32) in15 ^= *(uint32 *) (m + 60) # asm 1: xorl 60(bytes=int64#6 # asm 2: movq bytes=%r9 movq 408(%rsp),%r9 # qhasm: in8 = ((uint32 *)&x2)[0] # asm 1: movl in8=int64#4d # asm 2: movl in8=%ecx movl 16(%rsp),%ecx # qhasm: in9 = ((uint32 *)&x3)[1] # asm 1: movl 4+in9=int64#5d # asm 2: movl 4+in9=%r8d movl 4+32(%rsp),%r8d # qhasm: in8 += 1 # asm 1: add $1,in9=int64#5 # asm 2: mov in9=%r8 mov %rcx,%r8 # qhasm: (uint64) in9 >>= 32 # asm 1: shr $32,x2=stack128#2 # asm 2: movl x2=16(%rsp) movl %ecx,16(%rsp) # qhasm: ((uint32 *)&x3)[1] = in9 # asm 1: movl ? unsigned ja ._bytesatleast65 # comment:fp stack unchanged by jump # qhasm: goto bytesatleast64 if !unsigned< jae ._bytesatleast64 # qhasm: m = out # asm 1: mov m=int64#2 # asm 2: mov m=%rsi mov %rdi,%rsi # qhasm: out = ctarget # asm 1: mov out=int64#1 # asm 2: mov out=%rdi mov %rdx,%rdi # qhasm: i = bytes # asm 1: mov i=int64#4 # asm 2: mov i=%rcx mov %r9,%rcx # qhasm: while (i) { *out++ = *m++; --i } rep movsb # comment:fp stack unchanged by fallthrough # qhasm: bytesatleast64: ._bytesatleast64: # comment:fp stack unchanged by fallthrough # qhasm: done: ._done: # qhasm: r11_caller = r11_stack # asm 1: movq r11_caller=int64#9 # asm 2: movq r11_caller=%r11 movq 352(%rsp),%r11 # qhasm: r12_caller = r12_stack # asm 1: movq r12_caller=int64#10 # asm 2: movq r12_caller=%r12 movq 360(%rsp),%r12 # qhasm: r13_caller = r13_stack # asm 1: movq r13_caller=int64#11 # asm 2: movq r13_caller=%r13 movq 368(%rsp),%r13 # qhasm: r14_caller = r14_stack # asm 1: movq r14_caller=int64#12 # asm 2: movq r14_caller=%r14 movq 376(%rsp),%r14 # qhasm: r15_caller = r15_stack # asm 1: movq r15_caller=int64#13 # asm 2: movq r15_caller=%r15 movq 384(%rsp),%r15 # qhasm: rbx_caller = rbx_stack # asm 1: movq rbx_caller=int64#14 # asm 2: movq rbx_caller=%rbx movq 392(%rsp),%rbx # qhasm: rbp_caller = rbp_stack # asm 1: movq rbp_caller=int64#15 # asm 2: movq rbp_caller=%rbp movq 400(%rsp),%rbp # qhasm: leave add %r11,%rsp xor %rax,%rax xor %rdx,%rdx ret # qhasm: bytesatleast65: ._bytesatleast65: # qhasm: bytes -= 64 # asm 1: sub $64,= 64) { crypto_core_salsa208(c,in,k,sigma); u = 1; for (i = 8;i < 16;++i) { u += (unsigned int) in[i]; in[i] = u; u >>= 8; } clen -= 64; c += 64; } if (clen) { crypto_core_salsa208(block,in,k,sigma); for (i = 0;i < clen;++i) c[i] = block[i]; } return 0; } curvedns-curvedns-0.87/nacl/crypto_stream/salsa208/ref/xor.c000066400000000000000000000016621150631715100240320ustar00rootroot00000000000000/* version 20080913 D. J. Bernstein Public domain. */ #include "crypto_core_salsa208.h" #include "crypto_stream.h" typedef unsigned int uint32; static const unsigned char sigma[16] = "expand 32-byte k"; int crypto_stream_xor( unsigned char *c, const unsigned char *m,unsigned long long mlen, const unsigned char *n, const unsigned char *k ) { unsigned char in[16]; unsigned char block[64]; int i; unsigned int u; if (!mlen) return 0; for (i = 0;i < 8;++i) in[i] = n[i]; for (i = 8;i < 16;++i) in[i] = 0; while (mlen >= 64) { crypto_core_salsa208(block,in,k,sigma); for (i = 0;i < 64;++i) c[i] = m[i] ^ block[i]; u = 1; for (i = 8;i < 16;++i) { u += (unsigned int) in[i]; in[i] = u; u >>= 8; } mlen -= 64; c += 64; m += 64; } if (mlen) { crypto_core_salsa208(block,in,k,sigma); for (i = 0;i < mlen;++i) c[i] = m[i] ^ block[i]; } return 0; } curvedns-curvedns-0.87/nacl/crypto_stream/salsa208/used000066400000000000000000000000001150631715100231460ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_stream/salsa208/x86_xmm5/000077500000000000000000000000001150631715100236705ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_stream/salsa208/x86_xmm5/api.h000066400000000000000000000000671150631715100246150ustar00rootroot00000000000000#define CRYPTO_KEYBYTES 32 #define CRYPTO_NONCEBYTES 8 curvedns-curvedns-0.87/nacl/crypto_stream/salsa208/x86_xmm5/stream.s000066400000000000000000004265621150631715100253660ustar00rootroot00000000000000 # qhasm: int32 a # qhasm: stack32 arg1 # qhasm: stack32 arg2 # qhasm: stack32 arg3 # qhasm: stack32 arg4 # qhasm: stack32 arg5 # qhasm: stack32 arg6 # qhasm: input arg1 # qhasm: input arg2 # qhasm: input arg3 # qhasm: input arg4 # qhasm: input arg5 # qhasm: input arg6 # qhasm: int32 eax # qhasm: int32 ebx # qhasm: int32 esi # qhasm: int32 edi # qhasm: int32 ebp # qhasm: caller eax # qhasm: caller ebx # qhasm: caller esi # qhasm: caller edi # qhasm: caller ebp # qhasm: int32 k # qhasm: int32 kbits # qhasm: int32 iv # qhasm: int32 i # qhasm: stack128 x0 # qhasm: stack128 x1 # qhasm: stack128 x2 # qhasm: stack128 x3 # qhasm: int32 m # qhasm: stack32 out_stack # qhasm: int32 out # qhasm: stack32 bytes_stack # qhasm: int32 bytes # qhasm: stack32 eax_stack # qhasm: stack32 ebx_stack # qhasm: stack32 esi_stack # qhasm: stack32 edi_stack # qhasm: stack32 ebp_stack # qhasm: int6464 diag0 # qhasm: int6464 diag1 # qhasm: int6464 diag2 # qhasm: int6464 diag3 # qhasm: int6464 a0 # qhasm: int6464 a1 # qhasm: int6464 a2 # qhasm: int6464 a3 # qhasm: int6464 a4 # qhasm: int6464 a5 # qhasm: int6464 a6 # qhasm: int6464 a7 # qhasm: int6464 b0 # qhasm: int6464 b1 # qhasm: int6464 b2 # qhasm: int6464 b3 # qhasm: int6464 b4 # qhasm: int6464 b5 # qhasm: int6464 b6 # qhasm: int6464 b7 # qhasm: int6464 z0 # qhasm: int6464 z1 # qhasm: int6464 z2 # qhasm: int6464 z3 # qhasm: int6464 z4 # qhasm: int6464 z5 # qhasm: int6464 z6 # qhasm: int6464 z7 # qhasm: int6464 z8 # qhasm: int6464 z9 # qhasm: int6464 z10 # qhasm: int6464 z11 # qhasm: int6464 z12 # qhasm: int6464 z13 # qhasm: int6464 z14 # qhasm: int6464 z15 # qhasm: stack128 z0_stack # qhasm: stack128 z1_stack # qhasm: stack128 z2_stack # qhasm: stack128 z3_stack # qhasm: stack128 z4_stack # qhasm: stack128 z5_stack # qhasm: stack128 z6_stack # qhasm: stack128 z7_stack # qhasm: stack128 z8_stack # qhasm: stack128 z9_stack # qhasm: stack128 z10_stack # qhasm: stack128 z11_stack # qhasm: stack128 z12_stack # qhasm: stack128 z13_stack # qhasm: stack128 z14_stack # qhasm: stack128 z15_stack # qhasm: stack128 orig0 # qhasm: stack128 orig1 # qhasm: stack128 orig2 # qhasm: stack128 orig3 # qhasm: stack128 orig4 # qhasm: stack128 orig5 # qhasm: stack128 orig6 # qhasm: stack128 orig7 # qhasm: stack128 orig8 # qhasm: stack128 orig9 # qhasm: stack128 orig10 # qhasm: stack128 orig11 # qhasm: stack128 orig12 # qhasm: stack128 orig13 # qhasm: stack128 orig14 # qhasm: stack128 orig15 # qhasm: int6464 p # qhasm: int6464 q # qhasm: int6464 r # qhasm: int6464 s # qhasm: int6464 t # qhasm: int6464 u # qhasm: int6464 v # qhasm: int6464 w # qhasm: int6464 mp # qhasm: int6464 mq # qhasm: int6464 mr # qhasm: int6464 ms # qhasm: int6464 mt # qhasm: int6464 mu # qhasm: int6464 mv # qhasm: int6464 mw # qhasm: int32 in0 # qhasm: int32 in1 # qhasm: int32 in2 # qhasm: int32 in3 # qhasm: int32 in4 # qhasm: int32 in5 # qhasm: int32 in6 # qhasm: int32 in7 # qhasm: int32 in8 # qhasm: int32 in9 # qhasm: int32 in10 # qhasm: int32 in11 # qhasm: int32 in12 # qhasm: int32 in13 # qhasm: int32 in14 # qhasm: int32 in15 # qhasm: stack512 tmp # qhasm: stack32 ctarget # qhasm: enter crypto_stream_salsa208_x86_xmm5 .text .p2align 5 .globl _crypto_stream_salsa208_x86_xmm5 .globl crypto_stream_salsa208_x86_xmm5 _crypto_stream_salsa208_x86_xmm5: crypto_stream_salsa208_x86_xmm5: mov %esp,%eax and $31,%eax add $704,%eax sub %eax,%esp # qhasm: eax_stack = eax # asm 1: movl eax_stack=stack32#1 # asm 2: movl eax_stack=0(%esp) movl %eax,0(%esp) # qhasm: ebx_stack = ebx # asm 1: movl ebx_stack=stack32#2 # asm 2: movl ebx_stack=4(%esp) movl %ebx,4(%esp) # qhasm: esi_stack = esi # asm 1: movl esi_stack=stack32#3 # asm 2: movl esi_stack=8(%esp) movl %esi,8(%esp) # qhasm: edi_stack = edi # asm 1: movl edi_stack=stack32#4 # asm 2: movl edi_stack=12(%esp) movl %edi,12(%esp) # qhasm: ebp_stack = ebp # asm 1: movl ebp_stack=stack32#5 # asm 2: movl ebp_stack=16(%esp) movl %ebp,16(%esp) # qhasm: bytes = arg2 # asm 1: movl bytes=int32#3 # asm 2: movl bytes=%edx movl 8(%esp,%eax),%edx # qhasm: out = arg1 # asm 1: movl out=int32#6 # asm 2: movl out=%edi movl 4(%esp,%eax),%edi # qhasm: m = out # asm 1: mov m=int32#5 # asm 2: mov m=%esi mov %edi,%esi # qhasm: iv = arg4 # asm 1: movl iv=int32#4 # asm 2: movl iv=%ebx movl 16(%esp,%eax),%ebx # qhasm: k = arg5 # asm 1: movl k=int32#7 # asm 2: movl k=%ebp movl 20(%esp,%eax),%ebp # qhasm: unsigned>? bytes - 0 # asm 1: cmp $0, jbe ._done # qhasm: a = 0 # asm 1: mov $0,>a=int32#1 # asm 2: mov $0,>a=%eax mov $0,%eax # qhasm: i = bytes # asm 1: mov i=int32#2 # asm 2: mov i=%ecx mov %edx,%ecx # qhasm: while (i) { *out++ = a; --i } rep stosb # qhasm: out -= bytes # asm 1: subl eax_stack=stack32#1 # asm 2: movl eax_stack=0(%esp) movl %eax,0(%esp) # qhasm: ebx_stack = ebx # asm 1: movl ebx_stack=stack32#2 # asm 2: movl ebx_stack=4(%esp) movl %ebx,4(%esp) # qhasm: esi_stack = esi # asm 1: movl esi_stack=stack32#3 # asm 2: movl esi_stack=8(%esp) movl %esi,8(%esp) # qhasm: edi_stack = edi # asm 1: movl edi_stack=stack32#4 # asm 2: movl edi_stack=12(%esp) movl %edi,12(%esp) # qhasm: ebp_stack = ebp # asm 1: movl ebp_stack=stack32#5 # asm 2: movl ebp_stack=16(%esp) movl %ebp,16(%esp) # qhasm: out = arg1 # asm 1: movl out=int32#6 # asm 2: movl out=%edi movl 4(%esp,%eax),%edi # qhasm: m = arg2 # asm 1: movl m=int32#5 # asm 2: movl m=%esi movl 8(%esp,%eax),%esi # qhasm: bytes = arg3 # asm 1: movl bytes=int32#3 # asm 2: movl bytes=%edx movl 12(%esp,%eax),%edx # qhasm: iv = arg5 # asm 1: movl iv=int32#4 # asm 2: movl iv=%ebx movl 20(%esp,%eax),%ebx # qhasm: k = arg6 # asm 1: movl k=int32#7 # asm 2: movl k=%ebp movl 24(%esp,%eax),%ebp # qhasm: unsigned>? bytes - 0 # asm 1: cmp $0, jbe ._done # comment:fp stack unchanged by fallthrough # qhasm: start: ._start: # qhasm: out_stack = out # asm 1: movl out_stack=stack32#6 # asm 2: movl out_stack=20(%esp) movl %edi,20(%esp) # qhasm: bytes_stack = bytes # asm 1: movl bytes_stack=stack32#7 # asm 2: movl bytes_stack=24(%esp) movl %edx,24(%esp) # qhasm: in4 = *(uint32 *) (k + 12) # asm 1: movl 12(in4=int32#1 # asm 2: movl 12(in4=%eax movl 12(%ebp),%eax # qhasm: in12 = *(uint32 *) (k + 20) # asm 1: movl 20(in12=int32#2 # asm 2: movl 20(in12=%ecx movl 20(%ebp),%ecx # qhasm: ((uint32 *)&x3)[0] = in4 # asm 1: movl x3=stack128#1 # asm 2: movl x3=32(%esp) movl %eax,32(%esp) # qhasm: ((uint32 *)&x1)[0] = in12 # asm 1: movl x1=stack128#2 # asm 2: movl x1=48(%esp) movl %ecx,48(%esp) # qhasm: in0 = 1634760805 # asm 1: mov $1634760805,>in0=int32#1 # asm 2: mov $1634760805,>in0=%eax mov $1634760805,%eax # qhasm: in8 = 0 # asm 1: mov $0,>in8=int32#2 # asm 2: mov $0,>in8=%ecx mov $0,%ecx # qhasm: ((uint32 *)&x0)[0] = in0 # asm 1: movl x0=stack128#3 # asm 2: movl x0=64(%esp) movl %eax,64(%esp) # qhasm: ((uint32 *)&x2)[0] = in8 # asm 1: movl x2=stack128#4 # asm 2: movl x2=80(%esp) movl %ecx,80(%esp) # qhasm: in6 = *(uint32 *) (iv + 0) # asm 1: movl 0(in6=int32#1 # asm 2: movl 0(in6=%eax movl 0(%ebx),%eax # qhasm: in7 = *(uint32 *) (iv + 4) # asm 1: movl 4(in7=int32#2 # asm 2: movl 4(in7=%ecx movl 4(%ebx),%ecx # qhasm: ((uint32 *)&x1)[2] = in6 # asm 1: movl in9=int32#1 # asm 2: mov $0,>in9=%eax mov $0,%eax # qhasm: in10 = 2036477234 # asm 1: mov $2036477234,>in10=int32#2 # asm 2: mov $2036477234,>in10=%ecx mov $2036477234,%ecx # qhasm: ((uint32 *)&x3)[1] = in9 # asm 1: movl in1=int32#1 # asm 2: movl 0(in1=%eax movl 0(%ebp),%eax # qhasm: in2 = *(uint32 *) (k + 4) # asm 1: movl 4(in2=int32#2 # asm 2: movl 4(in2=%ecx movl 4(%ebp),%ecx # qhasm: in3 = *(uint32 *) (k + 8) # asm 1: movl 8(in3=int32#3 # asm 2: movl 8(in3=%edx movl 8(%ebp),%edx # qhasm: in5 = 857760878 # asm 1: mov $857760878,>in5=int32#4 # asm 2: mov $857760878,>in5=%ebx mov $857760878,%ebx # qhasm: ((uint32 *)&x1)[1] = in1 # asm 1: movl in11=int32#1 # asm 2: movl 16(in11=%eax movl 16(%ebp),%eax # qhasm: in13 = *(uint32 *) (k + 24) # asm 1: movl 24(in13=int32#2 # asm 2: movl 24(in13=%ecx movl 24(%ebp),%ecx # qhasm: in14 = *(uint32 *) (k + 28) # asm 1: movl 28(in14=int32#3 # asm 2: movl 28(in14=%edx movl 28(%ebp),%edx # qhasm: in15 = 1797285236 # asm 1: mov $1797285236,>in15=int32#4 # asm 2: mov $1797285236,>in15=%ebx mov $1797285236,%ebx # qhasm: ((uint32 *)&x1)[3] = in11 # asm 1: movl bytes=int32#1 # asm 2: movl bytes=%eax movl 24(%esp),%eax # qhasm: unsignedz0=int6464#1 # asm 2: movdqa z0=%xmm0 movdqa 64(%esp),%xmm0 # qhasm: z5 = z0[1,1,1,1] # asm 1: pshufd $0x55,z5=int6464#2 # asm 2: pshufd $0x55,z5=%xmm1 pshufd $0x55,%xmm0,%xmm1 # qhasm: z10 = z0[2,2,2,2] # asm 1: pshufd $0xaa,z10=int6464#3 # asm 2: pshufd $0xaa,z10=%xmm2 pshufd $0xaa,%xmm0,%xmm2 # qhasm: z15 = z0[3,3,3,3] # asm 1: pshufd $0xff,z15=int6464#4 # asm 2: pshufd $0xff,z15=%xmm3 pshufd $0xff,%xmm0,%xmm3 # qhasm: z0 = z0[0,0,0,0] # asm 1: pshufd $0x00,z0=int6464#1 # asm 2: pshufd $0x00,z0=%xmm0 pshufd $0x00,%xmm0,%xmm0 # qhasm: orig5 = z5 # asm 1: movdqa orig5=stack128#5 # asm 2: movdqa orig5=96(%esp) movdqa %xmm1,96(%esp) # qhasm: orig10 = z10 # asm 1: movdqa orig10=stack128#6 # asm 2: movdqa orig10=112(%esp) movdqa %xmm2,112(%esp) # qhasm: orig15 = z15 # asm 1: movdqa orig15=stack128#7 # asm 2: movdqa orig15=128(%esp) movdqa %xmm3,128(%esp) # qhasm: orig0 = z0 # asm 1: movdqa orig0=stack128#8 # asm 2: movdqa orig0=144(%esp) movdqa %xmm0,144(%esp) # qhasm: z1 = x1 # asm 1: movdqa z1=int6464#1 # asm 2: movdqa z1=%xmm0 movdqa 48(%esp),%xmm0 # qhasm: z6 = z1[2,2,2,2] # asm 1: pshufd $0xaa,z6=int6464#2 # asm 2: pshufd $0xaa,z6=%xmm1 pshufd $0xaa,%xmm0,%xmm1 # qhasm: z11 = z1[3,3,3,3] # asm 1: pshufd $0xff,z11=int6464#3 # asm 2: pshufd $0xff,z11=%xmm2 pshufd $0xff,%xmm0,%xmm2 # qhasm: z12 = z1[0,0,0,0] # asm 1: pshufd $0x00,z12=int6464#4 # asm 2: pshufd $0x00,z12=%xmm3 pshufd $0x00,%xmm0,%xmm3 # qhasm: z1 = z1[1,1,1,1] # asm 1: pshufd $0x55,z1=int6464#1 # asm 2: pshufd $0x55,z1=%xmm0 pshufd $0x55,%xmm0,%xmm0 # qhasm: orig6 = z6 # asm 1: movdqa orig6=stack128#9 # asm 2: movdqa orig6=160(%esp) movdqa %xmm1,160(%esp) # qhasm: orig11 = z11 # asm 1: movdqa orig11=stack128#10 # asm 2: movdqa orig11=176(%esp) movdqa %xmm2,176(%esp) # qhasm: orig12 = z12 # asm 1: movdqa orig12=stack128#11 # asm 2: movdqa orig12=192(%esp) movdqa %xmm3,192(%esp) # qhasm: orig1 = z1 # asm 1: movdqa orig1=stack128#12 # asm 2: movdqa orig1=208(%esp) movdqa %xmm0,208(%esp) # qhasm: z2 = x2 # asm 1: movdqa z2=int6464#1 # asm 2: movdqa z2=%xmm0 movdqa 80(%esp),%xmm0 # qhasm: z7 = z2[3,3,3,3] # asm 1: pshufd $0xff,z7=int6464#2 # asm 2: pshufd $0xff,z7=%xmm1 pshufd $0xff,%xmm0,%xmm1 # qhasm: z13 = z2[1,1,1,1] # asm 1: pshufd $0x55,z13=int6464#3 # asm 2: pshufd $0x55,z13=%xmm2 pshufd $0x55,%xmm0,%xmm2 # qhasm: z2 = z2[2,2,2,2] # asm 1: pshufd $0xaa,z2=int6464#1 # asm 2: pshufd $0xaa,z2=%xmm0 pshufd $0xaa,%xmm0,%xmm0 # qhasm: orig7 = z7 # asm 1: movdqa orig7=stack128#13 # asm 2: movdqa orig7=224(%esp) movdqa %xmm1,224(%esp) # qhasm: orig13 = z13 # asm 1: movdqa orig13=stack128#14 # asm 2: movdqa orig13=240(%esp) movdqa %xmm2,240(%esp) # qhasm: orig2 = z2 # asm 1: movdqa orig2=stack128#15 # asm 2: movdqa orig2=256(%esp) movdqa %xmm0,256(%esp) # qhasm: z3 = x3 # asm 1: movdqa z3=int6464#1 # asm 2: movdqa z3=%xmm0 movdqa 32(%esp),%xmm0 # qhasm: z4 = z3[0,0,0,0] # asm 1: pshufd $0x00,z4=int6464#2 # asm 2: pshufd $0x00,z4=%xmm1 pshufd $0x00,%xmm0,%xmm1 # qhasm: z14 = z3[2,2,2,2] # asm 1: pshufd $0xaa,z14=int6464#3 # asm 2: pshufd $0xaa,z14=%xmm2 pshufd $0xaa,%xmm0,%xmm2 # qhasm: z3 = z3[3,3,3,3] # asm 1: pshufd $0xff,z3=int6464#1 # asm 2: pshufd $0xff,z3=%xmm0 pshufd $0xff,%xmm0,%xmm0 # qhasm: orig4 = z4 # asm 1: movdqa orig4=stack128#16 # asm 2: movdqa orig4=272(%esp) movdqa %xmm1,272(%esp) # qhasm: orig14 = z14 # asm 1: movdqa orig14=stack128#17 # asm 2: movdqa orig14=288(%esp) movdqa %xmm2,288(%esp) # qhasm: orig3 = z3 # asm 1: movdqa orig3=stack128#18 # asm 2: movdqa orig3=304(%esp) movdqa %xmm0,304(%esp) # qhasm: bytesatleast256: ._bytesatleast256: # qhasm: in8 = ((uint32 *)&x2)[0] # asm 1: movl in8=int32#2 # asm 2: movl in8=%ecx movl 80(%esp),%ecx # qhasm: in9 = ((uint32 *)&x3)[1] # asm 1: movl 4+in9=int32#3 # asm 2: movl 4+in9=%edx movl 4+32(%esp),%edx # qhasm: ((uint32 *) &orig8)[0] = in8 # asm 1: movl orig8=stack128#19 # asm 2: movl orig8=320(%esp) movl %ecx,320(%esp) # qhasm: ((uint32 *) &orig9)[0] = in9 # asm 1: movl orig9=stack128#20 # asm 2: movl orig9=336(%esp) movl %edx,336(%esp) # qhasm: carry? in8 += 1 # asm 1: add $1,x2=stack128#4 # asm 2: movl x2=80(%esp) movl %ecx,80(%esp) # qhasm: ((uint32 *)&x3)[1] = in9 # asm 1: movl bytes_stack=stack32#7 # asm 2: movl bytes_stack=24(%esp) movl %eax,24(%esp) # qhasm: i = 8 # asm 1: mov $8,>i=int32#1 # asm 2: mov $8,>i=%eax mov $8,%eax # qhasm: z5 = orig5 # asm 1: movdqa z5=int6464#1 # asm 2: movdqa z5=%xmm0 movdqa 96(%esp),%xmm0 # qhasm: z10 = orig10 # asm 1: movdqa z10=int6464#2 # asm 2: movdqa z10=%xmm1 movdqa 112(%esp),%xmm1 # qhasm: z15 = orig15 # asm 1: movdqa z15=int6464#3 # asm 2: movdqa z15=%xmm2 movdqa 128(%esp),%xmm2 # qhasm: z14 = orig14 # asm 1: movdqa z14=int6464#4 # asm 2: movdqa z14=%xmm3 movdqa 288(%esp),%xmm3 # qhasm: z3 = orig3 # asm 1: movdqa z3=int6464#5 # asm 2: movdqa z3=%xmm4 movdqa 304(%esp),%xmm4 # qhasm: z6 = orig6 # asm 1: movdqa z6=int6464#6 # asm 2: movdqa z6=%xmm5 movdqa 160(%esp),%xmm5 # qhasm: z11 = orig11 # asm 1: movdqa z11=int6464#7 # asm 2: movdqa z11=%xmm6 movdqa 176(%esp),%xmm6 # qhasm: z1 = orig1 # asm 1: movdqa z1=int6464#8 # asm 2: movdqa z1=%xmm7 movdqa 208(%esp),%xmm7 # qhasm: z5_stack = z5 # asm 1: movdqa z5_stack=stack128#21 # asm 2: movdqa z5_stack=352(%esp) movdqa %xmm0,352(%esp) # qhasm: z10_stack = z10 # asm 1: movdqa z10_stack=stack128#22 # asm 2: movdqa z10_stack=368(%esp) movdqa %xmm1,368(%esp) # qhasm: z15_stack = z15 # asm 1: movdqa z15_stack=stack128#23 # asm 2: movdqa z15_stack=384(%esp) movdqa %xmm2,384(%esp) # qhasm: z14_stack = z14 # asm 1: movdqa z14_stack=stack128#24 # asm 2: movdqa z14_stack=400(%esp) movdqa %xmm3,400(%esp) # qhasm: z3_stack = z3 # asm 1: movdqa z3_stack=stack128#25 # asm 2: movdqa z3_stack=416(%esp) movdqa %xmm4,416(%esp) # qhasm: z6_stack = z6 # asm 1: movdqa z6_stack=stack128#26 # asm 2: movdqa z6_stack=432(%esp) movdqa %xmm5,432(%esp) # qhasm: z11_stack = z11 # asm 1: movdqa z11_stack=stack128#27 # asm 2: movdqa z11_stack=448(%esp) movdqa %xmm6,448(%esp) # qhasm: z1_stack = z1 # asm 1: movdqa z1_stack=stack128#28 # asm 2: movdqa z1_stack=464(%esp) movdqa %xmm7,464(%esp) # qhasm: z7 = orig7 # asm 1: movdqa z7=int6464#5 # asm 2: movdqa z7=%xmm4 movdqa 224(%esp),%xmm4 # qhasm: z13 = orig13 # asm 1: movdqa z13=int6464#6 # asm 2: movdqa z13=%xmm5 movdqa 240(%esp),%xmm5 # qhasm: z2 = orig2 # asm 1: movdqa z2=int6464#7 # asm 2: movdqa z2=%xmm6 movdqa 256(%esp),%xmm6 # qhasm: z9 = orig9 # asm 1: movdqa z9=int6464#8 # asm 2: movdqa z9=%xmm7 movdqa 336(%esp),%xmm7 # qhasm: p = orig0 # asm 1: movdqa p=int6464#1 # asm 2: movdqa p=%xmm0 movdqa 144(%esp),%xmm0 # qhasm: t = orig12 # asm 1: movdqa t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa 192(%esp),%xmm2 # qhasm: q = orig4 # asm 1: movdqa q=int6464#4 # asm 2: movdqa q=%xmm3 movdqa 272(%esp),%xmm3 # qhasm: r = orig8 # asm 1: movdqa r=int6464#2 # asm 2: movdqa r=%xmm1 movdqa 320(%esp),%xmm1 # qhasm: z7_stack = z7 # asm 1: movdqa z7_stack=stack128#29 # asm 2: movdqa z7_stack=480(%esp) movdqa %xmm4,480(%esp) # qhasm: z13_stack = z13 # asm 1: movdqa z13_stack=stack128#30 # asm 2: movdqa z13_stack=496(%esp) movdqa %xmm5,496(%esp) # qhasm: z2_stack = z2 # asm 1: movdqa z2_stack=stack128#31 # asm 2: movdqa z2_stack=512(%esp) movdqa %xmm6,512(%esp) # qhasm: z9_stack = z9 # asm 1: movdqa z9_stack=stack128#32 # asm 2: movdqa z9_stack=528(%esp) movdqa %xmm7,528(%esp) # qhasm: z0_stack = p # asm 1: movdqa z0_stack=stack128#33 # asm 2: movdqa z0_stack=544(%esp) movdqa %xmm0,544(%esp) # qhasm: z12_stack = t # asm 1: movdqa z12_stack=stack128#34 # asm 2: movdqa z12_stack=560(%esp) movdqa %xmm2,560(%esp) # qhasm: z4_stack = q # asm 1: movdqa z4_stack=stack128#35 # asm 2: movdqa z4_stack=576(%esp) movdqa %xmm3,576(%esp) # qhasm: z8_stack = r # asm 1: movdqa z8_stack=stack128#36 # asm 2: movdqa z8_stack=592(%esp) movdqa %xmm1,592(%esp) # qhasm: mainloop1: ._mainloop1: # qhasm: assign xmm0 to p # qhasm: assign xmm1 to r # qhasm: assign xmm2 to t # qhasm: assign xmm3 to q # qhasm: s = t # asm 1: movdqa s=int6464#7 # asm 2: movdqa s=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 t += p # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 25 # asm 1: psrld $25,z4_stack=stack128#33 # asm 2: movdqa z4_stack=544(%esp) movdqa %xmm3,544(%esp) # qhasm: t = p # asm 1: movdqa t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa %xmm0,%xmm2 # qhasm: uint32323232 t += q # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 23 # asm 1: psrld $23,z8_stack=stack128#34 # asm 2: movdqa z8_stack=560(%esp) movdqa %xmm1,560(%esp) # qhasm: uint32323232 q += r # asm 1: paddd u=int6464#3 # asm 2: movdqa u=%xmm2 movdqa %xmm3,%xmm2 # qhasm: uint32323232 q >>= 19 # asm 1: psrld $19,mt=int6464#3 # asm 2: movdqa mt=%xmm2 movdqa 464(%esp),%xmm2 # qhasm: mp = z5_stack # asm 1: movdqa mp=int6464#5 # asm 2: movdqa mp=%xmm4 movdqa 352(%esp),%xmm4 # qhasm: mq = z9_stack # asm 1: movdqa mq=int6464#4 # asm 2: movdqa mq=%xmm3 movdqa 528(%esp),%xmm3 # qhasm: mr = z13_stack # asm 1: movdqa mr=int6464#6 # asm 2: movdqa mr=%xmm5 movdqa 496(%esp),%xmm5 # qhasm: z12_stack = s # asm 1: movdqa z12_stack=stack128#30 # asm 2: movdqa z12_stack=496(%esp) movdqa %xmm6,496(%esp) # qhasm: uint32323232 r += s # asm 1: paddd u=int6464#7 # asm 2: movdqa u=%xmm6 movdqa %xmm1,%xmm6 # qhasm: uint32323232 r >>= 14 # asm 1: psrld $14,z0_stack=stack128#21 # asm 2: movdqa z0_stack=352(%esp) movdqa %xmm0,352(%esp) # qhasm: assign xmm2 to mt # qhasm: assign xmm3 to mq # qhasm: assign xmm4 to mp # qhasm: assign xmm5 to mr # qhasm: ms = mt # asm 1: movdqa ms=int6464#7 # asm 2: movdqa ms=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 mt += mp # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm2,%xmm0 # qhasm: uint32323232 mt >>= 25 # asm 1: psrld $25,z9_stack=stack128#32 # asm 2: movdqa z9_stack=528(%esp) movdqa %xmm3,528(%esp) # qhasm: mt = mp # asm 1: movdqa mt=int6464#1 # asm 2: movdqa mt=%xmm0 movdqa %xmm4,%xmm0 # qhasm: uint32323232 mt += mq # asm 1: paddd mu=int6464#2 # asm 2: movdqa mu=%xmm1 movdqa %xmm0,%xmm1 # qhasm: uint32323232 mt >>= 23 # asm 1: psrld $23,z13_stack=stack128#35 # asm 2: movdqa z13_stack=576(%esp) movdqa %xmm5,576(%esp) # qhasm: uint32323232 mq += mr # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm3,%xmm0 # qhasm: uint32323232 mq >>= 19 # asm 1: psrld $19,t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa 432(%esp),%xmm2 # qhasm: p = z10_stack # asm 1: movdqa p=int6464#1 # asm 2: movdqa p=%xmm0 movdqa 368(%esp),%xmm0 # qhasm: q = z14_stack # asm 1: movdqa q=int6464#4 # asm 2: movdqa q=%xmm3 movdqa 400(%esp),%xmm3 # qhasm: r = z2_stack # asm 1: movdqa r=int6464#2 # asm 2: movdqa r=%xmm1 movdqa 512(%esp),%xmm1 # qhasm: z1_stack = ms # asm 1: movdqa z1_stack=stack128#22 # asm 2: movdqa z1_stack=368(%esp) movdqa %xmm6,368(%esp) # qhasm: uint32323232 mr += ms # asm 1: paddd mu=int6464#7 # asm 2: movdqa mu=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 mr >>= 14 # asm 1: psrld $14,z5_stack=stack128#24 # asm 2: movdqa z5_stack=400(%esp) movdqa %xmm4,400(%esp) # qhasm: assign xmm0 to p # qhasm: assign xmm1 to r # qhasm: assign xmm2 to t # qhasm: assign xmm3 to q # qhasm: s = t # asm 1: movdqa s=int6464#7 # asm 2: movdqa s=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 t += p # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 25 # asm 1: psrld $25,z14_stack=stack128#36 # asm 2: movdqa z14_stack=592(%esp) movdqa %xmm3,592(%esp) # qhasm: t = p # asm 1: movdqa t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa %xmm0,%xmm2 # qhasm: uint32323232 t += q # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 23 # asm 1: psrld $23,z2_stack=stack128#26 # asm 2: movdqa z2_stack=432(%esp) movdqa %xmm1,432(%esp) # qhasm: uint32323232 q += r # asm 1: paddd u=int6464#3 # asm 2: movdqa u=%xmm2 movdqa %xmm3,%xmm2 # qhasm: uint32323232 q >>= 19 # asm 1: psrld $19,mt=int6464#3 # asm 2: movdqa mt=%xmm2 movdqa 448(%esp),%xmm2 # qhasm: mp = z15_stack # asm 1: movdqa mp=int6464#5 # asm 2: movdqa mp=%xmm4 movdqa 384(%esp),%xmm4 # qhasm: mq = z3_stack # asm 1: movdqa mq=int6464#4 # asm 2: movdqa mq=%xmm3 movdqa 416(%esp),%xmm3 # qhasm: mr = z7_stack # asm 1: movdqa mr=int6464#6 # asm 2: movdqa mr=%xmm5 movdqa 480(%esp),%xmm5 # qhasm: z6_stack = s # asm 1: movdqa z6_stack=stack128#23 # asm 2: movdqa z6_stack=384(%esp) movdqa %xmm6,384(%esp) # qhasm: uint32323232 r += s # asm 1: paddd u=int6464#7 # asm 2: movdqa u=%xmm6 movdqa %xmm1,%xmm6 # qhasm: uint32323232 r >>= 14 # asm 1: psrld $14,z10_stack=stack128#27 # asm 2: movdqa z10_stack=448(%esp) movdqa %xmm0,448(%esp) # qhasm: assign xmm2 to mt # qhasm: assign xmm3 to mq # qhasm: assign xmm4 to mp # qhasm: assign xmm5 to mr # qhasm: ms = mt # asm 1: movdqa ms=int6464#7 # asm 2: movdqa ms=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 mt += mp # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm2,%xmm0 # qhasm: uint32323232 mt >>= 25 # asm 1: psrld $25,z3_stack=stack128#25 # asm 2: movdqa z3_stack=416(%esp) movdqa %xmm3,416(%esp) # qhasm: mt = mp # asm 1: movdqa mt=int6464#1 # asm 2: movdqa mt=%xmm0 movdqa %xmm4,%xmm0 # qhasm: uint32323232 mt += mq # asm 1: paddd mu=int6464#2 # asm 2: movdqa mu=%xmm1 movdqa %xmm0,%xmm1 # qhasm: uint32323232 mt >>= 23 # asm 1: psrld $23,z7_stack=stack128#29 # asm 2: movdqa z7_stack=480(%esp) movdqa %xmm5,480(%esp) # qhasm: uint32323232 mq += mr # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm3,%xmm0 # qhasm: uint32323232 mq >>= 19 # asm 1: psrld $19,t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa 416(%esp),%xmm2 # qhasm: p = z0_stack # asm 1: movdqa p=int6464#1 # asm 2: movdqa p=%xmm0 movdqa 352(%esp),%xmm0 # qhasm: q = z1_stack # asm 1: movdqa q=int6464#4 # asm 2: movdqa q=%xmm3 movdqa 368(%esp),%xmm3 # qhasm: r = z2_stack # asm 1: movdqa r=int6464#2 # asm 2: movdqa r=%xmm1 movdqa 432(%esp),%xmm1 # qhasm: z11_stack = ms # asm 1: movdqa z11_stack=stack128#21 # asm 2: movdqa z11_stack=352(%esp) movdqa %xmm6,352(%esp) # qhasm: uint32323232 mr += ms # asm 1: paddd mu=int6464#7 # asm 2: movdqa mu=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 mr >>= 14 # asm 1: psrld $14,z15_stack=stack128#22 # asm 2: movdqa z15_stack=368(%esp) movdqa %xmm4,368(%esp) # qhasm: assign xmm0 to p # qhasm: assign xmm1 to r # qhasm: assign xmm2 to t # qhasm: assign xmm3 to q # qhasm: s = t # asm 1: movdqa s=int6464#7 # asm 2: movdqa s=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 t += p # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 25 # asm 1: psrld $25,z1_stack=stack128#28 # asm 2: movdqa z1_stack=464(%esp) movdqa %xmm3,464(%esp) # qhasm: t = p # asm 1: movdqa t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa %xmm0,%xmm2 # qhasm: uint32323232 t += q # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 23 # asm 1: psrld $23,z2_stack=stack128#31 # asm 2: movdqa z2_stack=512(%esp) movdqa %xmm1,512(%esp) # qhasm: uint32323232 q += r # asm 1: paddd u=int6464#3 # asm 2: movdqa u=%xmm2 movdqa %xmm3,%xmm2 # qhasm: uint32323232 q >>= 19 # asm 1: psrld $19,mt=int6464#3 # asm 2: movdqa mt=%xmm2 movdqa 544(%esp),%xmm2 # qhasm: mp = z5_stack # asm 1: movdqa mp=int6464#5 # asm 2: movdqa mp=%xmm4 movdqa 400(%esp),%xmm4 # qhasm: mq = z6_stack # asm 1: movdqa mq=int6464#4 # asm 2: movdqa mq=%xmm3 movdqa 384(%esp),%xmm3 # qhasm: mr = z7_stack # asm 1: movdqa mr=int6464#6 # asm 2: movdqa mr=%xmm5 movdqa 480(%esp),%xmm5 # qhasm: z3_stack = s # asm 1: movdqa z3_stack=stack128#25 # asm 2: movdqa z3_stack=416(%esp) movdqa %xmm6,416(%esp) # qhasm: uint32323232 r += s # asm 1: paddd u=int6464#7 # asm 2: movdqa u=%xmm6 movdqa %xmm1,%xmm6 # qhasm: uint32323232 r >>= 14 # asm 1: psrld $14,z0_stack=stack128#33 # asm 2: movdqa z0_stack=544(%esp) movdqa %xmm0,544(%esp) # qhasm: assign xmm2 to mt # qhasm: assign xmm3 to mq # qhasm: assign xmm4 to mp # qhasm: assign xmm5 to mr # qhasm: ms = mt # asm 1: movdqa ms=int6464#7 # asm 2: movdqa ms=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 mt += mp # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm2,%xmm0 # qhasm: uint32323232 mt >>= 25 # asm 1: psrld $25,z6_stack=stack128#26 # asm 2: movdqa z6_stack=432(%esp) movdqa %xmm3,432(%esp) # qhasm: mt = mp # asm 1: movdqa mt=int6464#1 # asm 2: movdqa mt=%xmm0 movdqa %xmm4,%xmm0 # qhasm: uint32323232 mt += mq # asm 1: paddd mu=int6464#2 # asm 2: movdqa mu=%xmm1 movdqa %xmm0,%xmm1 # qhasm: uint32323232 mt >>= 23 # asm 1: psrld $23,z7_stack=stack128#29 # asm 2: movdqa z7_stack=480(%esp) movdqa %xmm5,480(%esp) # qhasm: uint32323232 mq += mr # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm3,%xmm0 # qhasm: uint32323232 mq >>= 19 # asm 1: psrld $19,t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa 528(%esp),%xmm2 # qhasm: p = z10_stack # asm 1: movdqa p=int6464#1 # asm 2: movdqa p=%xmm0 movdqa 448(%esp),%xmm0 # qhasm: q = z11_stack # asm 1: movdqa q=int6464#4 # asm 2: movdqa q=%xmm3 movdqa 352(%esp),%xmm3 # qhasm: r = z8_stack # asm 1: movdqa r=int6464#2 # asm 2: movdqa r=%xmm1 movdqa 560(%esp),%xmm1 # qhasm: z4_stack = ms # asm 1: movdqa z4_stack=stack128#34 # asm 2: movdqa z4_stack=560(%esp) movdqa %xmm6,560(%esp) # qhasm: uint32323232 mr += ms # asm 1: paddd mu=int6464#7 # asm 2: movdqa mu=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 mr >>= 14 # asm 1: psrld $14,z5_stack=stack128#21 # asm 2: movdqa z5_stack=352(%esp) movdqa %xmm4,352(%esp) # qhasm: assign xmm0 to p # qhasm: assign xmm1 to r # qhasm: assign xmm2 to t # qhasm: assign xmm3 to q # qhasm: s = t # asm 1: movdqa s=int6464#7 # asm 2: movdqa s=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 t += p # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 25 # asm 1: psrld $25,z11_stack=stack128#27 # asm 2: movdqa z11_stack=448(%esp) movdqa %xmm3,448(%esp) # qhasm: t = p # asm 1: movdqa t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa %xmm0,%xmm2 # qhasm: uint32323232 t += q # asm 1: paddd u=int6464#5 # asm 2: movdqa u=%xmm4 movdqa %xmm2,%xmm4 # qhasm: uint32323232 t >>= 23 # asm 1: psrld $23,z8_stack=stack128#37 # asm 2: movdqa z8_stack=608(%esp) movdqa %xmm1,608(%esp) # qhasm: uint32323232 q += r # asm 1: paddd u=int6464#3 # asm 2: movdqa u=%xmm2 movdqa %xmm3,%xmm2 # qhasm: uint32323232 q >>= 19 # asm 1: psrld $19,mt=int6464#3 # asm 2: movdqa mt=%xmm2 movdqa 592(%esp),%xmm2 # qhasm: mp = z15_stack # asm 1: movdqa mp=int6464#5 # asm 2: movdqa mp=%xmm4 movdqa 368(%esp),%xmm4 # qhasm: mq = z12_stack # asm 1: movdqa mq=int6464#4 # asm 2: movdqa mq=%xmm3 movdqa 496(%esp),%xmm3 # qhasm: mr = z13_stack # asm 1: movdqa mr=int6464#6 # asm 2: movdqa mr=%xmm5 movdqa 576(%esp),%xmm5 # qhasm: z9_stack = s # asm 1: movdqa z9_stack=stack128#32 # asm 2: movdqa z9_stack=528(%esp) movdqa %xmm6,528(%esp) # qhasm: uint32323232 r += s # asm 1: paddd u=int6464#7 # asm 2: movdqa u=%xmm6 movdqa %xmm1,%xmm6 # qhasm: uint32323232 r >>= 14 # asm 1: psrld $14,z10_stack=stack128#22 # asm 2: movdqa z10_stack=368(%esp) movdqa %xmm0,368(%esp) # qhasm: assign xmm2 to mt # qhasm: assign xmm3 to mq # qhasm: assign xmm4 to mp # qhasm: assign xmm5 to mr # qhasm: ms = mt # asm 1: movdqa ms=int6464#7 # asm 2: movdqa ms=%xmm6 movdqa %xmm2,%xmm6 # qhasm: uint32323232 mt += mp # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm2,%xmm0 # qhasm: uint32323232 mt >>= 25 # asm 1: psrld $25,z12_stack=stack128#35 # asm 2: movdqa z12_stack=576(%esp) movdqa %xmm3,576(%esp) # qhasm: mt = mp # asm 1: movdqa mt=int6464#1 # asm 2: movdqa mt=%xmm0 movdqa %xmm4,%xmm0 # qhasm: uint32323232 mt += mq # asm 1: paddd mu=int6464#2 # asm 2: movdqa mu=%xmm1 movdqa %xmm0,%xmm1 # qhasm: uint32323232 mt >>= 23 # asm 1: psrld $23,z13_stack=stack128#30 # asm 2: movdqa z13_stack=496(%esp) movdqa %xmm5,496(%esp) # qhasm: uint32323232 mq += mr # asm 1: paddd mu=int6464#1 # asm 2: movdqa mu=%xmm0 movdqa %xmm3,%xmm0 # qhasm: uint32323232 mq >>= 19 # asm 1: psrld $19,t=int6464#3 # asm 2: movdqa t=%xmm2 movdqa 576(%esp),%xmm2 # qhasm: p = z0_stack # asm 1: movdqa p=int6464#1 # asm 2: movdqa p=%xmm0 movdqa 544(%esp),%xmm0 # qhasm: q = z4_stack # asm 1: movdqa q=int6464#4 # asm 2: movdqa q=%xmm3 movdqa 560(%esp),%xmm3 # qhasm: r = z8_stack # asm 1: movdqa r=int6464#2 # asm 2: movdqa r=%xmm1 movdqa 608(%esp),%xmm1 # qhasm: z14_stack = ms # asm 1: movdqa z14_stack=stack128#24 # asm 2: movdqa z14_stack=400(%esp) movdqa %xmm6,400(%esp) # qhasm: uint32323232 mr += ms # asm 1: paddd mu=int6464#7 # asm 2: movdqa mu=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 mr >>= 14 # asm 1: psrld $14,z15_stack=stack128#23 # asm 2: movdqa z15_stack=384(%esp) movdqa %xmm4,384(%esp) # qhasm: unsigned>? i -= 2 # asm 1: sub $2, ja ._mainloop1 # qhasm: out = out_stack # asm 1: movl out=int32#6 # asm 2: movl out=%edi movl 20(%esp),%edi # qhasm: z0 = z0_stack # asm 1: movdqa z0=int6464#1 # asm 2: movdqa z0=%xmm0 movdqa 544(%esp),%xmm0 # qhasm: z1 = z1_stack # asm 1: movdqa z1=int6464#2 # asm 2: movdqa z1=%xmm1 movdqa 464(%esp),%xmm1 # qhasm: z2 = z2_stack # asm 1: movdqa z2=int6464#3 # asm 2: movdqa z2=%xmm2 movdqa 512(%esp),%xmm2 # qhasm: z3 = z3_stack # asm 1: movdqa z3=int6464#4 # asm 2: movdqa z3=%xmm3 movdqa 416(%esp),%xmm3 # qhasm: uint32323232 z0 += orig0 # asm 1: paddd in0=int32#1 # asm 2: movd in0=%eax movd %xmm0,%eax # qhasm: in1 = z1 # asm 1: movd in1=int32#2 # asm 2: movd in1=%ecx movd %xmm1,%ecx # qhasm: in2 = z2 # asm 1: movd in2=int32#3 # asm 2: movd in2=%edx movd %xmm2,%edx # qhasm: in3 = z3 # asm 1: movd in3=int32#4 # asm 2: movd in3=%ebx movd %xmm3,%ebx # qhasm: z0 <<<= 96 # asm 1: pshufd $0x39,in0=int32#1 # asm 2: movd in0=%eax movd %xmm0,%eax # qhasm: in1 = z1 # asm 1: movd in1=int32#2 # asm 2: movd in1=%ecx movd %xmm1,%ecx # qhasm: in2 = z2 # asm 1: movd in2=int32#3 # asm 2: movd in2=%edx movd %xmm2,%edx # qhasm: in3 = z3 # asm 1: movd in3=int32#4 # asm 2: movd in3=%ebx movd %xmm3,%ebx # qhasm: z0 <<<= 96 # asm 1: pshufd $0x39,in0=int32#1 # asm 2: movd in0=%eax movd %xmm0,%eax # qhasm: in1 = z1 # asm 1: movd in1=int32#2 # asm 2: movd in1=%ecx movd %xmm1,%ecx # qhasm: in2 = z2 # asm 1: movd in2=int32#3 # asm 2: movd in2=%edx movd %xmm2,%edx # qhasm: in3 = z3 # asm 1: movd in3=int32#4 # asm 2: movd in3=%ebx movd %xmm3,%ebx # qhasm: z0 <<<= 96 # asm 1: pshufd $0x39,in0=int32#1 # asm 2: movd in0=%eax movd %xmm0,%eax # qhasm: in1 = z1 # asm 1: movd in1=int32#2 # asm 2: movd in1=%ecx movd %xmm1,%ecx # qhasm: in2 = z2 # asm 1: movd in2=int32#3 # asm 2: movd in2=%edx movd %xmm2,%edx # qhasm: in3 = z3 # asm 1: movd in3=int32#4 # asm 2: movd in3=%ebx movd %xmm3,%ebx # qhasm: in0 ^= *(uint32 *) (m + 192) # asm 1: xorl 192(z4=int6464#1 # asm 2: movdqa z4=%xmm0 movdqa 560(%esp),%xmm0 # qhasm: z5 = z5_stack # asm 1: movdqa z5=int6464#2 # asm 2: movdqa z5=%xmm1 movdqa 352(%esp),%xmm1 # qhasm: z6 = z6_stack # asm 1: movdqa z6=int6464#3 # asm 2: movdqa z6=%xmm2 movdqa 432(%esp),%xmm2 # qhasm: z7 = z7_stack # asm 1: movdqa z7=int6464#4 # asm 2: movdqa z7=%xmm3 movdqa 480(%esp),%xmm3 # qhasm: uint32323232 z4 += orig4 # asm 1: paddd in4=int32#1 # asm 2: movd in4=%eax movd %xmm0,%eax # qhasm: in5 = z5 # asm 1: movd in5=int32#2 # asm 2: movd in5=%ecx movd %xmm1,%ecx # qhasm: in6 = z6 # asm 1: movd in6=int32#3 # asm 2: movd in6=%edx movd %xmm2,%edx # qhasm: in7 = z7 # asm 1: movd in7=int32#4 # asm 2: movd in7=%ebx movd %xmm3,%ebx # qhasm: z4 <<<= 96 # asm 1: pshufd $0x39,in4=int32#1 # asm 2: movd in4=%eax movd %xmm0,%eax # qhasm: in5 = z5 # asm 1: movd in5=int32#2 # asm 2: movd in5=%ecx movd %xmm1,%ecx # qhasm: in6 = z6 # asm 1: movd in6=int32#3 # asm 2: movd in6=%edx movd %xmm2,%edx # qhasm: in7 = z7 # asm 1: movd in7=int32#4 # asm 2: movd in7=%ebx movd %xmm3,%ebx # qhasm: z4 <<<= 96 # asm 1: pshufd $0x39,in4=int32#1 # asm 2: movd in4=%eax movd %xmm0,%eax # qhasm: in5 = z5 # asm 1: movd in5=int32#2 # asm 2: movd in5=%ecx movd %xmm1,%ecx # qhasm: in6 = z6 # asm 1: movd in6=int32#3 # asm 2: movd in6=%edx movd %xmm2,%edx # qhasm: in7 = z7 # asm 1: movd in7=int32#4 # asm 2: movd in7=%ebx movd %xmm3,%ebx # qhasm: z4 <<<= 96 # asm 1: pshufd $0x39,in4=int32#1 # asm 2: movd in4=%eax movd %xmm0,%eax # qhasm: in5 = z5 # asm 1: movd in5=int32#2 # asm 2: movd in5=%ecx movd %xmm1,%ecx # qhasm: in6 = z6 # asm 1: movd in6=int32#3 # asm 2: movd in6=%edx movd %xmm2,%edx # qhasm: in7 = z7 # asm 1: movd in7=int32#4 # asm 2: movd in7=%ebx movd %xmm3,%ebx # qhasm: in4 ^= *(uint32 *) (m + 208) # asm 1: xorl 208(z8=int6464#1 # asm 2: movdqa z8=%xmm0 movdqa 608(%esp),%xmm0 # qhasm: z9 = z9_stack # asm 1: movdqa z9=int6464#2 # asm 2: movdqa z9=%xmm1 movdqa 528(%esp),%xmm1 # qhasm: z10 = z10_stack # asm 1: movdqa z10=int6464#3 # asm 2: movdqa z10=%xmm2 movdqa 368(%esp),%xmm2 # qhasm: z11 = z11_stack # asm 1: movdqa z11=int6464#4 # asm 2: movdqa z11=%xmm3 movdqa 448(%esp),%xmm3 # qhasm: uint32323232 z8 += orig8 # asm 1: paddd in8=int32#1 # asm 2: movd in8=%eax movd %xmm0,%eax # qhasm: in9 = z9 # asm 1: movd in9=int32#2 # asm 2: movd in9=%ecx movd %xmm1,%ecx # qhasm: in10 = z10 # asm 1: movd in10=int32#3 # asm 2: movd in10=%edx movd %xmm2,%edx # qhasm: in11 = z11 # asm 1: movd in11=int32#4 # asm 2: movd in11=%ebx movd %xmm3,%ebx # qhasm: z8 <<<= 96 # asm 1: pshufd $0x39,in8=int32#1 # asm 2: movd in8=%eax movd %xmm0,%eax # qhasm: in9 = z9 # asm 1: movd in9=int32#2 # asm 2: movd in9=%ecx movd %xmm1,%ecx # qhasm: in10 = z10 # asm 1: movd in10=int32#3 # asm 2: movd in10=%edx movd %xmm2,%edx # qhasm: in11 = z11 # asm 1: movd in11=int32#4 # asm 2: movd in11=%ebx movd %xmm3,%ebx # qhasm: z8 <<<= 96 # asm 1: pshufd $0x39,in8=int32#1 # asm 2: movd in8=%eax movd %xmm0,%eax # qhasm: in9 = z9 # asm 1: movd in9=int32#2 # asm 2: movd in9=%ecx movd %xmm1,%ecx # qhasm: in10 = z10 # asm 1: movd in10=int32#3 # asm 2: movd in10=%edx movd %xmm2,%edx # qhasm: in11 = z11 # asm 1: movd in11=int32#4 # asm 2: movd in11=%ebx movd %xmm3,%ebx # qhasm: z8 <<<= 96 # asm 1: pshufd $0x39,in8=int32#1 # asm 2: movd in8=%eax movd %xmm0,%eax # qhasm: in9 = z9 # asm 1: movd in9=int32#2 # asm 2: movd in9=%ecx movd %xmm1,%ecx # qhasm: in10 = z10 # asm 1: movd in10=int32#3 # asm 2: movd in10=%edx movd %xmm2,%edx # qhasm: in11 = z11 # asm 1: movd in11=int32#4 # asm 2: movd in11=%ebx movd %xmm3,%ebx # qhasm: in8 ^= *(uint32 *) (m + 224) # asm 1: xorl 224(z12=int6464#1 # asm 2: movdqa z12=%xmm0 movdqa 576(%esp),%xmm0 # qhasm: z13 = z13_stack # asm 1: movdqa z13=int6464#2 # asm 2: movdqa z13=%xmm1 movdqa 496(%esp),%xmm1 # qhasm: z14 = z14_stack # asm 1: movdqa z14=int6464#3 # asm 2: movdqa z14=%xmm2 movdqa 400(%esp),%xmm2 # qhasm: z15 = z15_stack # asm 1: movdqa z15=int6464#4 # asm 2: movdqa z15=%xmm3 movdqa 384(%esp),%xmm3 # qhasm: uint32323232 z12 += orig12 # asm 1: paddd in12=int32#1 # asm 2: movd in12=%eax movd %xmm0,%eax # qhasm: in13 = z13 # asm 1: movd in13=int32#2 # asm 2: movd in13=%ecx movd %xmm1,%ecx # qhasm: in14 = z14 # asm 1: movd in14=int32#3 # asm 2: movd in14=%edx movd %xmm2,%edx # qhasm: in15 = z15 # asm 1: movd in15=int32#4 # asm 2: movd in15=%ebx movd %xmm3,%ebx # qhasm: z12 <<<= 96 # asm 1: pshufd $0x39,in12=int32#1 # asm 2: movd in12=%eax movd %xmm0,%eax # qhasm: in13 = z13 # asm 1: movd in13=int32#2 # asm 2: movd in13=%ecx movd %xmm1,%ecx # qhasm: in14 = z14 # asm 1: movd in14=int32#3 # asm 2: movd in14=%edx movd %xmm2,%edx # qhasm: in15 = z15 # asm 1: movd in15=int32#4 # asm 2: movd in15=%ebx movd %xmm3,%ebx # qhasm: z12 <<<= 96 # asm 1: pshufd $0x39,in12=int32#1 # asm 2: movd in12=%eax movd %xmm0,%eax # qhasm: in13 = z13 # asm 1: movd in13=int32#2 # asm 2: movd in13=%ecx movd %xmm1,%ecx # qhasm: in14 = z14 # asm 1: movd in14=int32#3 # asm 2: movd in14=%edx movd %xmm2,%edx # qhasm: in15 = z15 # asm 1: movd in15=int32#4 # asm 2: movd in15=%ebx movd %xmm3,%ebx # qhasm: z12 <<<= 96 # asm 1: pshufd $0x39,in12=int32#1 # asm 2: movd in12=%eax movd %xmm0,%eax # qhasm: in13 = z13 # asm 1: movd in13=int32#2 # asm 2: movd in13=%ecx movd %xmm1,%ecx # qhasm: in14 = z14 # asm 1: movd in14=int32#3 # asm 2: movd in14=%edx movd %xmm2,%edx # qhasm: in15 = z15 # asm 1: movd in15=int32#4 # asm 2: movd in15=%ebx movd %xmm3,%ebx # qhasm: in12 ^= *(uint32 *) (m + 240) # asm 1: xorl 240(bytes=int32#1 # asm 2: movl bytes=%eax movl 24(%esp),%eax # qhasm: bytes -= 256 # asm 1: sub $256,out_stack=stack32#6 # asm 2: movl out_stack=20(%esp) movl %edi,20(%esp) # qhasm: unsigned? bytes - 0 # asm 1: cmp $0, jbe ._done # comment:fp stack unchanged by fallthrough # qhasm: bytesbetween1and255: ._bytesbetween1and255: # qhasm: unsignedctarget=stack32#6 # asm 2: movl ctarget=20(%esp) movl %edi,20(%esp) # qhasm: out = &tmp # asm 1: leal out=int32#6 # asm 2: leal out=%edi leal 640(%esp),%edi # qhasm: i = bytes # asm 1: mov i=int32#2 # asm 2: mov i=%ecx mov %eax,%ecx # qhasm: while (i) { *out++ = *m++; --i } rep movsb # qhasm: out = &tmp # asm 1: leal out=int32#6 # asm 2: leal out=%edi leal 640(%esp),%edi # qhasm: m = &tmp # asm 1: leal m=int32#5 # asm 2: leal m=%esi leal 640(%esp),%esi # comment:fp stack unchanged by fallthrough # qhasm: nocopy: ._nocopy: # qhasm: bytes_stack = bytes # asm 1: movl bytes_stack=stack32#7 # asm 2: movl bytes_stack=24(%esp) movl %eax,24(%esp) # qhasm: diag0 = x0 # asm 1: movdqa diag0=int6464#1 # asm 2: movdqa diag0=%xmm0 movdqa 64(%esp),%xmm0 # qhasm: diag1 = x1 # asm 1: movdqa diag1=int6464#2 # asm 2: movdqa diag1=%xmm1 movdqa 48(%esp),%xmm1 # qhasm: diag2 = x2 # asm 1: movdqa diag2=int6464#3 # asm 2: movdqa diag2=%xmm2 movdqa 80(%esp),%xmm2 # qhasm: diag3 = x3 # asm 1: movdqa diag3=int6464#4 # asm 2: movdqa diag3=%xmm3 movdqa 32(%esp),%xmm3 # qhasm: a0 = diag1 # asm 1: movdqa a0=int6464#5 # asm 2: movdqa a0=%xmm4 movdqa %xmm1,%xmm4 # qhasm: i = 8 # asm 1: mov $8,>i=int32#1 # asm 2: mov $8,>i=%eax mov $8,%eax # qhasm: mainloop2: ._mainloop2: # qhasm: uint32323232 a0 += diag0 # asm 1: paddd a1=int6464#6 # asm 2: movdqa a1=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b0 = a0 # asm 1: movdqa b0=int6464#7 # asm 2: movdqa b0=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a0 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a2=int6464#5 # asm 2: movdqa a2=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b1 = a1 # asm 1: movdqa b1=int6464#7 # asm 2: movdqa b1=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a1 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a3=int6464#6 # asm 2: movdqa a3=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b2 = a2 # asm 1: movdqa b2=int6464#7 # asm 2: movdqa b2=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a2 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,a4=int6464#5 # asm 2: movdqa a4=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b3 = a3 # asm 1: movdqa b3=int6464#7 # asm 2: movdqa b3=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a3 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,a5=int6464#6 # asm 2: movdqa a5=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b4 = a4 # asm 1: movdqa b4=int6464#7 # asm 2: movdqa b4=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a4 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a6=int6464#5 # asm 2: movdqa a6=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b5 = a5 # asm 1: movdqa b5=int6464#7 # asm 2: movdqa b5=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a5 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a7=int6464#6 # asm 2: movdqa a7=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b6 = a6 # asm 1: movdqa b6=int6464#7 # asm 2: movdqa b6=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a6 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,a0=int6464#5 # asm 2: movdqa a0=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b7 = a7 # asm 1: movdqa b7=int6464#7 # asm 2: movdqa b7=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a7 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,a1=int6464#6 # asm 2: movdqa a1=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b0 = a0 # asm 1: movdqa b0=int6464#7 # asm 2: movdqa b0=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a0 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a2=int6464#5 # asm 2: movdqa a2=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b1 = a1 # asm 1: movdqa b1=int6464#7 # asm 2: movdqa b1=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a1 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a3=int6464#6 # asm 2: movdqa a3=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b2 = a2 # asm 1: movdqa b2=int6464#7 # asm 2: movdqa b2=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a2 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,a4=int6464#5 # asm 2: movdqa a4=%xmm4 movdqa %xmm3,%xmm4 # qhasm: b3 = a3 # asm 1: movdqa b3=int6464#7 # asm 2: movdqa b3=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a3 <<= 18 # asm 1: pslld $18,>= 14 # asm 1: psrld $14,a5=int6464#6 # asm 2: movdqa a5=%xmm5 movdqa %xmm0,%xmm5 # qhasm: b4 = a4 # asm 1: movdqa b4=int6464#7 # asm 2: movdqa b4=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a4 <<= 7 # asm 1: pslld $7,>= 25 # asm 1: psrld $25,a6=int6464#5 # asm 2: movdqa a6=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b5 = a5 # asm 1: movdqa b5=int6464#7 # asm 2: movdqa b5=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a5 <<= 9 # asm 1: pslld $9,>= 23 # asm 1: psrld $23,a7=int6464#6 # asm 2: movdqa a7=%xmm5 movdqa %xmm2,%xmm5 # qhasm: b6 = a6 # asm 1: movdqa b6=int6464#7 # asm 2: movdqa b6=%xmm6 movdqa %xmm4,%xmm6 # qhasm: uint32323232 a6 <<= 13 # asm 1: pslld $13,>= 19 # asm 1: psrld $19,? i -= 4 # asm 1: sub $4,a0=int6464#5 # asm 2: movdqa a0=%xmm4 movdqa %xmm1,%xmm4 # qhasm: b7 = a7 # asm 1: movdqa b7=int6464#7 # asm 2: movdqa b7=%xmm6 movdqa %xmm5,%xmm6 # qhasm: uint32323232 a7 <<= 18 # asm 1: pslld $18,b0=int6464#8,>b0=int6464#8 # asm 2: pxor >b0=%xmm7,>b0=%xmm7 pxor %xmm7,%xmm7 # qhasm: uint32323232 b7 >>= 14 # asm 1: psrld $14, ja ._mainloop2 # qhasm: uint32323232 diag0 += x0 # asm 1: paddd in0=int32#1 # asm 2: movd in0=%eax movd %xmm0,%eax # qhasm: in12 = diag1 # asm 1: movd in12=int32#2 # asm 2: movd in12=%ecx movd %xmm1,%ecx # qhasm: in8 = diag2 # asm 1: movd in8=int32#3 # asm 2: movd in8=%edx movd %xmm2,%edx # qhasm: in4 = diag3 # asm 1: movd in4=int32#4 # asm 2: movd in4=%ebx movd %xmm3,%ebx # qhasm: diag0 <<<= 96 # asm 1: pshufd $0x39,in5=int32#1 # asm 2: movd in5=%eax movd %xmm0,%eax # qhasm: in1 = diag1 # asm 1: movd in1=int32#2 # asm 2: movd in1=%ecx movd %xmm1,%ecx # qhasm: in13 = diag2 # asm 1: movd in13=int32#3 # asm 2: movd in13=%edx movd %xmm2,%edx # qhasm: in9 = diag3 # asm 1: movd in9=int32#4 # asm 2: movd in9=%ebx movd %xmm3,%ebx # qhasm: diag0 <<<= 96 # asm 1: pshufd $0x39,in10=int32#1 # asm 2: movd in10=%eax movd %xmm0,%eax # qhasm: in6 = diag1 # asm 1: movd in6=int32#2 # asm 2: movd in6=%ecx movd %xmm1,%ecx # qhasm: in2 = diag2 # asm 1: movd in2=int32#3 # asm 2: movd in2=%edx movd %xmm2,%edx # qhasm: in14 = diag3 # asm 1: movd in14=int32#4 # asm 2: movd in14=%ebx movd %xmm3,%ebx # qhasm: diag0 <<<= 96 # asm 1: pshufd $0x39,in15=int32#1 # asm 2: movd in15=%eax movd %xmm0,%eax # qhasm: in11 = diag1 # asm 1: movd in11=int32#2 # asm 2: movd in11=%ecx movd %xmm1,%ecx # qhasm: in7 = diag2 # asm 1: movd in7=int32#3 # asm 2: movd in7=%edx movd %xmm2,%edx # qhasm: in3 = diag3 # asm 1: movd in3=int32#4 # asm 2: movd in3=%ebx movd %xmm3,%ebx # qhasm: in15 ^= *(uint32 *) (m + 60) # asm 1: xorl 60(bytes=int32#1 # asm 2: movl bytes=%eax movl 24(%esp),%eax # qhasm: in8 = ((uint32 *)&x2)[0] # asm 1: movl in8=int32#2 # asm 2: movl in8=%ecx movl 80(%esp),%ecx # qhasm: in9 = ((uint32 *)&x3)[1] # asm 1: movl 4+in9=int32#3 # asm 2: movl 4+in9=%edx movl 4+32(%esp),%edx # qhasm: carry? in8 += 1 # asm 1: add $1,x2=stack128#4 # asm 2: movl x2=80(%esp) movl %ecx,80(%esp) # qhasm: ((uint32 *)&x3)[1] = in9 # asm 1: movl ? unsigned ja ._bytesatleast65 # comment:fp stack unchanged by jump # qhasm: goto bytesatleast64 if !unsigned< jae ._bytesatleast64 # qhasm: m = out # asm 1: mov m=int32#5 # asm 2: mov m=%esi mov %edi,%esi # qhasm: out = ctarget # asm 1: movl out=int32#6 # asm 2: movl out=%edi movl 20(%esp),%edi # qhasm: i = bytes # asm 1: mov i=int32#2 # asm 2: mov i=%ecx mov %eax,%ecx # qhasm: while (i) { *out++ = *m++; --i } rep movsb # comment:fp stack unchanged by fallthrough # qhasm: bytesatleast64: ._bytesatleast64: # comment:fp stack unchanged by fallthrough # qhasm: done: ._done: # qhasm: eax = eax_stack # asm 1: movl eax=int32#1 # asm 2: movl eax=%eax movl 0(%esp),%eax # qhasm: ebx = ebx_stack # asm 1: movl ebx=int32#4 # asm 2: movl ebx=%ebx movl 4(%esp),%ebx # qhasm: esi = esi_stack # asm 1: movl esi=int32#5 # asm 2: movl esi=%esi movl 8(%esp),%esi # qhasm: edi = edi_stack # asm 1: movl edi=int32#6 # asm 2: movl edi=%edi movl 12(%esp),%edi # qhasm: ebp = ebp_stack # asm 1: movl ebp=int32#7 # asm 2: movl ebp=%ebp movl 16(%esp),%ebp # qhasm: leave add %eax,%esp xor %eax,%eax ret # qhasm: bytesatleast65: ._bytesatleast65: # qhasm: bytes -= 64 # asm 1: sub $64, #include "crypto_stream.h" extern unsigned char *alignedcalloc(unsigned long long); const char *primitiveimplementation = crypto_stream_IMPLEMENTATION; #define MAXTEST_BYTES 10000 #define CHECKSUM_BYTES 4096 #define TUNE_BYTES 1536 static unsigned char *k; static unsigned char *n; static unsigned char *m; static unsigned char *c; static unsigned char *s; static unsigned char *k2; static unsigned char *n2; static unsigned char *m2; static unsigned char *c2; static unsigned char *s2; void preallocate(void) { } void allocate(void) { k = alignedcalloc(crypto_stream_KEYBYTES); n = alignedcalloc(crypto_stream_NONCEBYTES); m = alignedcalloc(MAXTEST_BYTES); c = alignedcalloc(MAXTEST_BYTES); s = alignedcalloc(MAXTEST_BYTES); k2 = alignedcalloc(crypto_stream_KEYBYTES); n2 = alignedcalloc(crypto_stream_NONCEBYTES); m2 = alignedcalloc(MAXTEST_BYTES); c2 = alignedcalloc(MAXTEST_BYTES); s2 = alignedcalloc(MAXTEST_BYTES); } void predoit(void) { } void doit(void) { crypto_stream_xor(c,m,TUNE_BYTES,n,k); } char checksum[crypto_stream_KEYBYTES * 2 + 1]; const char *checksum_compute(void) { long long i; long long j; for (i = 0;i < CHECKSUM_BYTES;++i) { long long mlen = i; long long clen = i; long long slen = i; long long klen = crypto_stream_KEYBYTES; long long nlen = crypto_stream_NONCEBYTES; for (j = -16;j < 0;++j) m[j] = random(); for (j = -16;j < 0;++j) c[j] = random(); for (j = -16;j < 0;++j) s[j] = random(); for (j = -16;j < 0;++j) n[j] = random(); for (j = -16;j < 0;++j) k[j] = random(); for (j = mlen;j < mlen + 16;++j) m[j] = random(); for (j = clen;j < clen + 16;++j) c[j] = random(); for (j = slen;j < slen + 16;++j) s[j] = random(); for (j = nlen;j < nlen + 16;++j) n[j] = random(); for (j = klen;j < klen + 16;++j) k[j] = random(); for (j = -16;j < mlen + 16;++j) m2[j] = m[j]; for (j = -16;j < clen + 16;++j) c2[j] = c[j]; for (j = -16;j < slen + 16;++j) s2[j] = s[j]; for (j = -16;j < nlen + 16;++j) n2[j] = n[j]; for (j = -16;j < klen + 16;++j) k2[j] = k[j]; crypto_stream_xor(c,m,mlen,n,k); for (j = -16;j < mlen + 16;++j) if (m[j] != m2[j]) return "crypto_stream_xor overwrites m"; for (j = -16;j < slen + 16;++j) if (s[j] != s2[j]) return "crypto_stream_xor overwrites s"; for (j = -16;j < nlen + 16;++j) if (n[j] != n2[j]) return "crypto_stream_xor overwrites n"; for (j = -16;j < klen + 16;++j) if (k[j] != k2[j]) return "crypto_stream_xor overwrites k"; for (j = -16;j < 0;++j) if (c[j] != c2[j]) return "crypto_stream_xor writes before output"; for (j = clen;j < clen + 16;++j) if (c[j] != c2[j]) return "crypto_stream_xor writes after output"; for (j = -16;j < clen + 16;++j) c2[j] = c[j]; crypto_stream(s,slen,n,k); for (j = -16;j < mlen + 16;++j) if (m[j] != m2[j]) return "crypto_stream overwrites m"; for (j = -16;j < clen + 16;++j) if (c[j] != c2[j]) return "crypto_stream overwrites c"; for (j = -16;j < nlen + 16;++j) if (n[j] != n2[j]) return "crypto_stream overwrites n"; for (j = -16;j < klen + 16;++j) if (k[j] != k2[j]) return "crypto_stream overwrites k"; for (j = -16;j < 0;++j) if (s[j] != s2[j]) return "crypto_stream writes before output"; for (j = slen;j < slen + 16;++j) if (s[j] != s2[j]) return "crypto_stream writes after output"; for (j = 0;j < mlen;++j) if ((s[j] ^ m[j]) != c[j]) return "crypto_stream_xor does not match crypto_stream"; for (j = 0;j < clen;++j) k[j % klen] ^= c[j]; crypto_stream_xor(m,c,clen,n,k); crypto_stream(s,slen,n,k); for (j = 0;j < mlen;++j) if ((s[j] ^ m[j]) != c[j]) return "crypto_stream_xor does not match crypto_stream"; for (j = 0;j < mlen;++j) n[j % nlen] ^= m[j]; m[mlen] = 0; } for (i = 0;i < crypto_stream_KEYBYTES;++i) { checksum[2 * i] = "0123456789abcdef"[15 & (k[i] >> 4)]; checksum[2 * i + 1] = "0123456789abcdef"[15 & k[i]]; } checksum[2 * i] = 0; return 0; } curvedns-curvedns-0.87/nacl/crypto_stream/wrapper-stream.cpp000066400000000000000000000006631150631715100244220ustar00rootroot00000000000000#include using std::string; #include "crypto_stream.h" string crypto_stream(size_t clen,const string &n,const string &k) { if (n.size() != crypto_stream_NONCEBYTES) throw "incorrect nonce length"; if (k.size() != crypto_stream_KEYBYTES) throw "incorrect key length"; unsigned char c[clen]; crypto_stream(c,clen,(const unsigned char *) n.c_str(),(const unsigned char *) k.c_str()); return string((char *) c,clen); } curvedns-curvedns-0.87/nacl/crypto_stream/wrapper-xor.cpp000066400000000000000000000010171150631715100237310ustar00rootroot00000000000000#include using std::string; #include "crypto_stream.h" string crypto_stream_xor(const string &m,const string &n,const string &k) { if (n.size() != crypto_stream_NONCEBYTES) throw "incorrect nonce length"; if (k.size() != crypto_stream_KEYBYTES) throw "incorrect key length"; size_t mlen = m.size(); unsigned char c[mlen]; crypto_stream_xor(c, (const unsigned char *) m.c_str(),mlen, (const unsigned char *) n.c_str(), (const unsigned char *) k.c_str() ); return string((char *) c,mlen); } curvedns-curvedns-0.87/nacl/crypto_stream/xsalsa20/000077500000000000000000000000001150631715100223755ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_stream/xsalsa20/checksum000066400000000000000000000001011150631715100241120ustar00rootroot00000000000000201bc58a96adcb6ed339ca33c188af8ca04a4ce68be1e0953309ee09a0cf8e7a curvedns-curvedns-0.87/nacl/crypto_stream/xsalsa20/ref/000077500000000000000000000000001150631715100231515ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_stream/xsalsa20/ref/api.h000066400000000000000000000000701150631715100240700ustar00rootroot00000000000000#define CRYPTO_KEYBYTES 32 #define CRYPTO_NONCEBYTES 24 curvedns-curvedns-0.87/nacl/crypto_stream/xsalsa20/ref/stream.c000066400000000000000000000007161150631715100246140ustar00rootroot00000000000000/* version 20080914 D. J. Bernstein Public domain. */ #include "crypto_core_hsalsa20.h" #include "crypto_stream_salsa20.h" #include "crypto_stream.h" static const unsigned char sigma[16] = "expand 32-byte k"; int crypto_stream( unsigned char *c,unsigned long long clen, const unsigned char *n, const unsigned char *k ) { unsigned char subkey[32]; crypto_core_hsalsa20(subkey,n,k,sigma); return crypto_stream_salsa20(c,clen,n + 16,subkey); } curvedns-curvedns-0.87/nacl/crypto_stream/xsalsa20/ref/xor.c000066400000000000000000000007621150631715100241320ustar00rootroot00000000000000/* version 20080913 D. J. Bernstein Public domain. */ #include "crypto_core_hsalsa20.h" #include "crypto_stream_salsa20.h" #include "crypto_stream.h" static const unsigned char sigma[16] = "expand 32-byte k"; int crypto_stream_xor( unsigned char *c, const unsigned char *m,unsigned long long mlen, const unsigned char *n, const unsigned char *k ) { unsigned char subkey[32]; crypto_core_hsalsa20(subkey,n,k,sigma); return crypto_stream_salsa20_xor(c,m,mlen,n + 16,subkey); } curvedns-curvedns-0.87/nacl/crypto_stream/xsalsa20/selected000066400000000000000000000000001150631715100240760ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_stream/xsalsa20/used000066400000000000000000000000001150631715100232460ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_verify/000077500000000000000000000000001150631715100207515ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_verify/16/000077500000000000000000000000001150631715100211775ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_verify/16/checksum000066400000000000000000000000021150631715100227140ustar00rootroot000000000000000 curvedns-curvedns-0.87/nacl/crypto_verify/16/ref/000077500000000000000000000000001150631715100217535ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_verify/16/ref/api.h000066400000000000000000000000301150631715100226660ustar00rootroot00000000000000#define CRYPTO_BYTES 16 curvedns-curvedns-0.87/nacl/crypto_verify/16/ref/verify.c000066400000000000000000000005231150631715100234230ustar00rootroot00000000000000#include "crypto_verify.h" int crypto_verify(const unsigned char *x,const unsigned char *y) { unsigned int differentbits = 0; #define F(i) differentbits |= x[i] ^ y[i]; F(0) F(1) F(2) F(3) F(4) F(5) F(6) F(7) F(8) F(9) F(10) F(11) F(12) F(13) F(14) F(15) return (1 & ((differentbits - 1) >> 8)) - 1; } curvedns-curvedns-0.87/nacl/crypto_verify/16/used000066400000000000000000000000001150631715100220500ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_verify/32/000077500000000000000000000000001150631715100211755ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_verify/32/checksum000066400000000000000000000000021150631715100227120ustar00rootroot000000000000000 curvedns-curvedns-0.87/nacl/crypto_verify/32/ref/000077500000000000000000000000001150631715100217515ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_verify/32/ref/api.h000066400000000000000000000000301150631715100226640ustar00rootroot00000000000000#define CRYPTO_BYTES 32 curvedns-curvedns-0.87/nacl/crypto_verify/32/ref/verify.c000066400000000000000000000007231150631715100234230ustar00rootroot00000000000000#include "crypto_verify.h" int crypto_verify(const unsigned char *x,const unsigned char *y) { unsigned int differentbits = 0; #define F(i) differentbits |= x[i] ^ y[i]; F(0) F(1) F(2) F(3) F(4) F(5) F(6) F(7) F(8) F(9) F(10) F(11) F(12) F(13) F(14) F(15) F(16) F(17) F(18) F(19) F(20) F(21) F(22) F(23) F(24) F(25) F(26) F(27) F(28) F(29) F(30) F(31) return (1 & ((differentbits - 1) >> 8)) - 1; } curvedns-curvedns-0.87/nacl/crypto_verify/32/used000066400000000000000000000000001150631715100220460ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/crypto_verify/measure.c000066400000000000000000000005121150631715100225540ustar00rootroot00000000000000#include "crypto_verify.h" const char *primitiveimplementation = crypto_verify_IMPLEMENTATION; const char *implementationversion = crypto_verify_VERSION; const char *sizenames[] = { "inputbytes", 0 }; const long long sizes[] = { crypto_verify_BYTES }; void preallocate(void) { } void allocate(void) { } void measure(void) { } curvedns-curvedns-0.87/nacl/crypto_verify/try.c000066400000000000000000000030501150631715100217310ustar00rootroot00000000000000/* * crypto_verify/try.c version 20090118 * D. J. Bernstein * Public domain. */ #include #include "crypto_verify.h" extern unsigned char *alignedcalloc(unsigned long long); const char *primitiveimplementation = crypto_verify_IMPLEMENTATION; static unsigned char *x; static unsigned char *y; void preallocate(void) { } void allocate(void) { x = alignedcalloc(crypto_verify_BYTES); y = alignedcalloc(crypto_verify_BYTES); } void predoit(void) { } void doit(void) { crypto_verify(x,y); } static const char *check(void) { int r = crypto_verify(x,y); if (r == 0) { if (memcmp(x,y,crypto_verify_BYTES)) return "different strings pass verify"; } else if (r == -1) { if (!memcmp(x,y,crypto_verify_BYTES)) return "equal strings fail verify"; } else { return "weird return value from verify"; } return 0; } char checksum[2]; const char *checksum_compute(void) { long long tests; long long i; long long j; const char *c; for (tests = 0;tests < 100000;++tests) { for (i = 0;i < crypto_verify_BYTES;++i) x[i] = random(); for (i = 0;i < crypto_verify_BYTES;++i) y[i] = random(); c = check(); if (c) return c; for (i = 0;i < crypto_verify_BYTES;++i) y[i] = x[i]; c = check(); if (c) return c; y[random() % crypto_verify_BYTES] = random(); c = check(); if (c) return c; y[random() % crypto_verify_BYTES] = random(); c = check(); if (c) return c; y[random() % crypto_verify_BYTES] = random(); c = check(); if (c) return c; } checksum[0] = '0'; checksum[1] = 0; return 0; } curvedns-curvedns-0.87/nacl/crypto_verify/wrapper-empty.cpp000066400000000000000000000000001150631715100242570ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/inttypes/000077500000000000000000000000001150631715100177245ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/inttypes/crypto_int16.c000066400000000000000000000001041150631715100224240ustar00rootroot00000000000000#include "crypto_int16.h" #include "signed.h" DOIT(16,crypto_int16) curvedns-curvedns-0.87/nacl/inttypes/crypto_int32.c000066400000000000000000000001041150631715100224220ustar00rootroot00000000000000#include "crypto_int32.h" #include "signed.h" DOIT(32,crypto_int32) curvedns-curvedns-0.87/nacl/inttypes/crypto_int64.c000066400000000000000000000001041150631715100224270ustar00rootroot00000000000000#include "crypto_int64.h" #include "signed.h" DOIT(64,crypto_int64) curvedns-curvedns-0.87/nacl/inttypes/crypto_int8.c000066400000000000000000000001011150631715100223420ustar00rootroot00000000000000#include "crypto_int8.h" #include "signed.h" DOIT(8,crypto_int8) curvedns-curvedns-0.87/nacl/inttypes/crypto_uint16.c000066400000000000000000000001101150631715100226060ustar00rootroot00000000000000#include "crypto_uint16.h" #include "unsigned.h" DOIT(16,crypto_uint16) curvedns-curvedns-0.87/nacl/inttypes/crypto_uint32.c000066400000000000000000000001101150631715100226040ustar00rootroot00000000000000#include "crypto_uint32.h" #include "unsigned.h" DOIT(32,crypto_uint32) curvedns-curvedns-0.87/nacl/inttypes/crypto_uint64.c000066400000000000000000000001101150631715100226110ustar00rootroot00000000000000#include "crypto_uint64.h" #include "unsigned.h" DOIT(64,crypto_uint64) curvedns-curvedns-0.87/nacl/inttypes/crypto_uint8.c000066400000000000000000000001051150631715100225330ustar00rootroot00000000000000#include "crypto_uint8.h" #include "unsigned.h" DOIT(8,crypto_uint8) curvedns-curvedns-0.87/nacl/inttypes/do000066400000000000000000000024731150631715100202570ustar00rootroot00000000000000#!/bin/sh -e okabi | ( while read abi do ( echo 'int8 signed char' echo 'int16 short' echo 'int32 int' echo 'int32 long' echo 'int64 long long' echo 'int64 long' echo 'int64 int __attribute__((__mode__(__DI__)))' echo 'uint8 unsigned char' echo 'uint16 unsigned short' echo 'uint32 unsigned int' echo 'uint32 unsigned long' echo 'uint64 unsigned long long' echo 'uint64 unsigned long' echo 'uint64 unsigned int __attribute__((__mode__(__DI__)))' ) | ( while read target source do okc-$abi | ( while read c do [ -f include/$abi/crypto_$target.h ] && continue echo "=== `date` === $abi trying $source as $target under $c..." >&2 rm -f crypto_$target crypto_$target.h ( echo "#ifndef crypto_${target}_h" echo "#define crypto_${target}_h" echo "" echo "typedef ${source} crypto_${target};" echo "" echo "#endif" ) > crypto_$target.h $c -o crypto_$target crypto_$target.c || continue ./crypto_$target || continue mkdir -p include/$abi cp crypto_$target.h include/$abi/crypto_$target.h done ) done ) done ) curvedns-curvedns-0.87/nacl/inttypes/signed.h000066400000000000000000000003731150631715100213510ustar00rootroot00000000000000#define DOIT(bits,target) \ int main() \ { \ target x; \ int i; \ \ x = 1; \ for (i = 0;i < bits;++i) { \ if (x == 0) return 100; \ x += x; \ } \ if (x != 0) return 100; \ x -= 1; \ if (x > 0) return 100; \ \ return 0; \ } curvedns-curvedns-0.87/nacl/inttypes/unsigned.h000066400000000000000000000003731150631715100217140ustar00rootroot00000000000000#define DOIT(bits,target) \ int main() \ { \ target x; \ int i; \ \ x = 1; \ for (i = 0;i < bits;++i) { \ if (x == 0) return 100; \ x += x; \ } \ if (x != 0) return 100; \ x -= 1; \ if (x < 0) return 100; \ \ return 0; \ } curvedns-curvedns-0.87/nacl/measure-anything.c000066400000000000000000000113671150631715100215010ustar00rootroot00000000000000/* * measure-anything.c version 20090223 * D. J. Bernstein * Public domain. */ #include #include #include #include #include #include #include #include "cpucycles.h" #include "cpuid.h" typedef int uint32; static uint32 seed[32] = { 3,1,4,1,5,9,2,6,5,3,5,8,9,7,9,3,2,3,8,4,6,2,6,4,3,3,8,3,2,7,9,5 } ; static uint32 in[12]; static uint32 out[8]; static int outleft = 0; #define ROTATE(x,b) (((x) << (b)) | ((x) >> (32 - (b)))) #define MUSH(i,b) x = t[i] += (((x ^ seed[i]) + sum) ^ ROTATE(x,b)); static void surf(void) { uint32 t[12]; uint32 x; uint32 sum = 0; int r; int i; int loop; for (i = 0;i < 12;++i) t[i] = in[i] ^ seed[12 + i]; for (i = 0;i < 8;++i) out[i] = seed[24 + i]; x = t[11]; for (loop = 0;loop < 2;++loop) { for (r = 0;r < 16;++r) { sum += 0x9e3779b9; MUSH(0,5) MUSH(1,7) MUSH(2,9) MUSH(3,13) MUSH(4,5) MUSH(5,7) MUSH(6,9) MUSH(7,13) MUSH(8,5) MUSH(9,7) MUSH(10,9) MUSH(11,13) } for (i = 0;i < 8;++i) out[i] ^= t[i + 4]; } } void randombytes(unsigned char *x,unsigned long long xlen) { while (xlen > 0) { if (!outleft) { if (!++in[0]) if (!++in[1]) if (!++in[2]) ++in[3]; surf(); outleft = 8; } *x = out[--outleft]; ++x; --xlen; } } extern const char *primitiveimplementation; extern const char *implementationversion; extern const char *sizenames[]; extern const long long sizes[]; extern void preallocate(void); extern void allocate(void); extern void measure(void); static void printword(const char *s) { if (!*s) putchar('-'); while (*s) { if (*s == ' ') putchar('_'); else if (*s == '\t') putchar('_'); else if (*s == '\r') putchar('_'); else if (*s == '\n') putchar('_'); else putchar(*s); ++s; } putchar(' '); } static void printnum(long long x) { printf("%lld ",x); } static void fail(const char *why) { fprintf(stderr,"measure: fatal: %s\n",why); exit(111); } unsigned char *alignedcalloc(unsigned long long len) { unsigned char *x = (unsigned char *) calloc(1,len + 128); if (!x) fail("out of memory"); /* will never deallocate so shifting is ok */ x += 63 & (-(unsigned long) x); return x; } static long long cyclespersecond; static void printimplementations(void) { int i; printword("implementation"); printword(primitiveimplementation); printword(implementationversion); printf("\n"); fflush(stdout); for (i = 0;sizenames[i];++i) { printword(sizenames[i]); printnum(sizes[i]); printf("\n"); fflush(stdout); } printword("cpuid"); printword(cpuid); printf("\n"); fflush(stdout); printword("cpucycles_persecond"); printnum(cyclespersecond); printf("\n"); fflush(stdout); printword("cpucycles_implementation"); printword(cpucycles_implementation); printf("\n"); fflush(stdout); printword("compiler"); printword(COMPILER); #if defined(__VERSION__) && !defined(__ICC) printword(__VERSION__); #elif defined(__xlc__) printword(__xlc__); #elif defined(__ICC) { char buf[256]; sprintf(buf, "%d.%d.%d", __ICC/100, __ICC%100, __INTEL_COMPILER_BUILD_DATE); printword(buf); } #elif defined(__PGIC__) { char buf[256]; sprintf(buf, "%d.%d.%d", __PGIC__, __PGIC_MINOR__, __PGIC_PATCHLEVEL__); printword(buf); } #elif defined(__SUNPRO_C) { char buf[256]; int major, minor, micro; micro = __SUNPRO_C & 0xf; minor = (__SUNPRO_C >> 4) & 0xf; major = (__SUNPRO_C >> 8) & 0xf; if (micro) sprintf(buf, "%d.%d.%d", major, minor, micro); else sprintf(buf, "%d.%d", major, minor); printword(buf); } #else printword("unknown compiler version"); #endif printf("\n"); fflush(stdout); } void printentry(long long mbytes,const char *measuring,long long *m,long long mlen) { long long i; long long j; long long belowj; long long abovej; printword(measuring); if (mbytes >= 0) printnum(mbytes); else printword(""); if (mlen > 0) { for (j = 0;j + 1 < mlen;++j) { belowj = 0; for (i = 0;i < mlen;++i) if (m[i] < m[j]) ++belowj; abovej = 0; for (i = 0;i < mlen;++i) if (m[i] > m[j]) ++abovej; if (belowj * 2 < mlen && abovej * 2 < mlen) break; } printnum(m[j]); if (mlen > 1) { for (i = 0;i < mlen;++i) printnum(m[i]); } } printf("\n"); fflush(stdout); } void limits() { #ifdef RLIM_INFINITY struct rlimit r; r.rlim_cur = 0; r.rlim_max = 0; #ifdef RLIMIT_NOFILE setrlimit(RLIMIT_NOFILE,&r); #endif #ifdef RLIMIT_NPROC setrlimit(RLIMIT_NPROC,&r); #endif #ifdef RLIMIT_CORE setrlimit(RLIMIT_CORE,&r); #endif #endif } int main() { cyclespersecond = cpucycles_persecond(); preallocate(); limits(); printimplementations(); allocate(); measure(); return 0; } curvedns-curvedns-0.87/nacl/okcompilers/000077500000000000000000000000001150631715100203745ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/okcompilers/abiname.c000066400000000000000000000023731150631715100221410ustar00rootroot00000000000000#include const char *abi(void) { #if defined(__amd64__) || defined(__x86_64__) || defined(__AMD64__) || defined(_M_X64) || defined(__amd64) return "amd64"; #elif defined(__i386__) || defined(__x86__) || defined(__X86__) || defined(_M_IX86) || defined(__i386) return "x86"; #elif defined(__ia64__) || defined(__IA64__) || defined(__M_IA64) return "ia64"; #elif defined(__SPU__) return "cellspu"; #elif defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__) || defined(_ARCH_PPC64) return "ppc64"; #elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(_ARCH_PPC) return "ppc32"; #elif defined(__sparcv9__) || defined(__sparcv9) return "sparcv9"; #elif defined(__sparc_v8__) return "sparcv8"; #elif defined(__sparc__) || defined(__sparc) if (sizeof(long) == 4) return "sparcv8"; return "sparcv9"; #elif defined(__ARM_EABI__) return "armeabi"; #elif defined(__arm__) return "arm"; #elif defined(__mips__) || defined(__mips) || defined(__MIPS__) # if defined(_ABIO32) return "mipso32"; # elif defined(_ABIN32) return "mips32"; # else return "mips64"; # endif #else return "default"; #endif } int main(int argc,char **argv) { printf("%s %s\n",argv[1],abi()); return 0; } curvedns-curvedns-0.87/nacl/okcompilers/archivers000066400000000000000000000000131150631715100222770ustar00rootroot00000000000000ar ar -X64 curvedns-curvedns-0.87/nacl/okcompilers/c000066400000000000000000000006301150631715100205400ustar00rootroot00000000000000gcc -m64 -fomit-frame-pointer gcc -m64 -O -fomit-frame-pointer gcc -m64 -O3 -fomit-frame-pointer -funroll-loops gcc -m32 -fomit-frame-pointer gcc -m32 -O -fomit-frame-pointer gcc -m32 -O3 -fomit-frame-pointer -funroll-loops spu-gcc -mstdmain -march=cell -O -fomit-frame-pointer -Drandom=rand -Dsrandom=srand spu-gcc -mstdmain -march=cell -O3 -funroll-loops -fomit-frame-pointer -Drandom=rand -Dsrandom=srand curvedns-curvedns-0.87/nacl/okcompilers/cpp000066400000000000000000000006301150631715100211000ustar00rootroot00000000000000g++ -m64 -fomit-frame-pointer g++ -m64 -O -fomit-frame-pointer g++ -m64 -O3 -fomit-frame-pointer -funroll-loops g++ -m32 -fomit-frame-pointer g++ -m32 -O -fomit-frame-pointer g++ -m32 -O3 -fomit-frame-pointer -funroll-loops spu-g++ -mstdmain -march=cell -O -fomit-frame-pointer -Drandom=rand -Dsrandom=srand spu-g++ -mstdmain -march=cell -O3 -funroll-loops -fomit-frame-pointer -Drandom=rand -Dsrandom=srand curvedns-curvedns-0.87/nacl/okcompilers/do000077500000000000000000000107501150631715100207270ustar00rootroot00000000000000#!/bin/sh -e mkdir oldbin mkdir bin for language in c cpp do exec <$language exec 9>${language}-works while read c options do echo "=== `date` === checking $c $options" >&2 rm -f test* ( echo "#!/bin/sh" echo 'PATH="'"$PATH"'"' echo 'export PATH' echo "$c" "$options" '"$@"' ) > test-okc chmod 755 test-okc cat lib.c main.c > test.$language || continue ./test-okc -o test test.$language || continue ./test || continue cp main.c test1.$language || continue cp lib.c test2.$language || continue ./test-okc -c test1.$language || continue ./test-okc -c test2.$language || continue ./test-okc -o test1 test1.o test2.o || continue ./test1 || continue echo "=== `date` === success: $c $options is ok" echo "$c $options" >&9 done mv ${language}-works $language done exec oldbin/okabi chmod 755 oldbin/okabi echo "#!/bin/sh" >&7 while : do exec ${language}-compatible exec 9>${language}-incompatible echo "=== `date` === checking compatibility with $c $options" >&2 exec <$language while read c2 options2 do echo "=== `date` === checking $c2 $options2" >&2 works=1 rm -f test* ( echo "#!/bin/sh" echo 'PATH="'"$PATH"'"' echo 'export PATH' echo "$c" "$options" '"$@"' ) > test-okc chmod 755 test-okc ( echo "#!/bin/sh" echo 'PATH="'"$PATH"'"' echo 'export PATH' echo "$c2" "$options2" '"$@"' ) > test-okc2 chmod 755 test-okc2 if cp main.c test5.c \ && cp main.cpp test5.cpp \ && cp lib.c test6.c \ && ./test-okc2 -c test5.$language \ && ./test-okc -c test6.c \ && ./test-okc2 -o test5 test5.o test6.o \ && ./test5 then echo "=== `date` === success: $c2 $options2 is compatible" >&2 echo "$c2 $options2" >&8 else echo "$c2 $options2" >&9 fi done done abi=`awk '{print length($0),$0}' < c-compatible \ | sort -n | head -1 | sed 's/ *$//' | sed 's/^[^ ]* //' | tr ' /' '__'` echo "echo '"$abi"'" >&7 syslibs="" for i in -lm -lnsl -lsocket do echo "=== `date` === checking $i" >&2 ( echo "#!/bin/sh" echo 'PATH="'"$PATH"'"' echo 'export PATH' echo "$c" "$options" '"$@"' "$i" "$syslibs" ) > test-okclink chmod 755 test-okclink cat lib.c main.c > test.c || continue ./test-okclink -o test test.c $i $syslibs || continue ./test || continue syslibs="$i $syslibs" ( echo '#!/bin/sh' echo 'echo "'"$syslibs"'"' ) > "oldbin/oklibs-$abi" chmod 755 "oldbin/oklibs-$abi" done foundokar=0 exec &2 ( echo "#!/bin/sh" echo 'PATH="'"$PATH"'"' echo 'export PATH' echo "$a" '"$@"' ) > test-okar chmod 755 test-okar cp main.c test9.c || continue cp lib.c test10.c || continue ./test-okc -c test10.c || continue ./test-okar cr test10.a test10.o || continue ranlib test10.a || echo "=== `date` === no ranlib; continuing anyway" >&2 ./test-okc -o test9 test9.c test10.a || continue ./test9 || continue cp -p test-okar "oldbin/okar-$abi" echo "=== `date` === success: archiver $a is ok" >&2 foundokar=1 break done case $foundokar in 0) echo "=== `date` === giving up; no archivers work" >&2 exit 111 ;; esac for language in c cpp do mv ${language}-incompatible ${language} exec <${language}-compatible exec 9>"oldbin/ok${language}-$abi" chmod 755 "oldbin/ok${language}-$abi" echo "#!/bin/sh" >&9 while read c2 options2 do echo "echo '"$c2 $options2"'" >&9 done done done exec 7>/dev/null oldbin/okabi \ | while read abi do oldbin/okc-$abi \ | head -1 \ | while read c do $c -o abiname abiname.c \ && ./abiname "$abi" done done > abinames numabinames=`awk '{print $2}' < abinames | sort -u | wc -l` numabis=`oldbin/okabi | wc -l` if [ "$numabis" = "$numabinames" ] then exec bin/okabi chmod 755 bin/okabi echo '#!/bin/sh' >&7 while read oldabi newabi do mv "oldbin/okc-$oldabi" "bin/okc-$newabi" mv "oldbin/okcpp-$oldabi" "bin/okcpp-$newabi" mv "oldbin/okar-$oldabi" "bin/okar-$newabi" mv "oldbin/oklibs-$oldabi" "bin/oklibs-$newabi" echo "echo $newabi" >&7 done else cp -p oldbin/* bin fi curvedns-curvedns-0.87/nacl/okcompilers/lib.c000066400000000000000000000004131150631715100213040ustar00rootroot00000000000000int not3(int n) { return n != 3; } int bytes(int n) { return (n + 7) / 8; } long long shr32(long long n) { return n >> 32; } double double5(void) { return 5.0; } int intbytes(void) { return sizeof(int); } int longbytes(void) { return sizeof(long); } curvedns-curvedns-0.87/nacl/okcompilers/lib.cpp000066400000000000000000000002551150631715100216500ustar00rootroot00000000000000int not3(int n) { return n != 3; } int bytes(int n) { return (n + 7) / 8; } long long shr32(long long n) { return n >> 32; } double double5(void) { return 5.0; } curvedns-curvedns-0.87/nacl/okcompilers/main.c000066400000000000000000000012161150631715100214640ustar00rootroot00000000000000extern int not3(int); extern int bytes(int); extern long long shr32(long long); extern double double5(void); extern int longbytes(void); extern int intbytes(void); int main(int argc,char **argv) { if (intbytes() != sizeof(int)) return 100; if (longbytes() != sizeof(long)) return 100; if (not3(3)) return 100; /* on ppc32, gcc -mpowerpc64 produces SIGILL for >>32 */ if (!not3(shr32(1))) return 100; /* on pentium 1, gcc -march=pentium2 produces SIGILL for (...+7)/8 */ if (bytes(not3(1)) != 1) return 100; /* on pentium 1, gcc -march=prescott produces SIGILL for double comparison */ if (double5() < 0) return 100; return 0; } curvedns-curvedns-0.87/nacl/okcompilers/main.cpp000066400000000000000000000010211150631715100220160ustar00rootroot00000000000000extern "C" { extern int not3(int); extern int bytes(int); extern long long shr32(long long); extern double double5(void); } int main(int argc,char **argv) { if (not3(3)) return 100; /* on ppc32, gcc -mpowerpc64 produces SIGILL for >>32 */ if (!not3(shr32(1))) return 100; /* on pentium 1, gcc -march=pentium2 produces SIGILL for (...+7)/8 */ if (bytes(not3(1)) != 1) return 100; /* on pentium 1, gcc -march=prescott produces SIGILL for double comparison */ if (double5() < 0) return 100; return 0; } curvedns-curvedns-0.87/nacl/randombytes/000077500000000000000000000000001150631715100203745ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/randombytes/devurandom.c000066400000000000000000000010511150631715100227010ustar00rootroot00000000000000#include #include #include #include /* it's really stupid that there isn't a syscall for this */ static int fd = -1; void randombytes(unsigned char *x,unsigned long long xlen) { int i; if (fd == -1) { for (;;) { fd = open("/dev/urandom",O_RDONLY); if (fd != -1) break; sleep(1); } } while (xlen > 0) { if (xlen < 1048576) i = xlen; else i = 1048576; i = read(fd,x,i); if (i < 1) { sleep(1); continue; } x += i; xlen -= i; } } curvedns-curvedns-0.87/nacl/randombytes/devurandom.h000066400000000000000000000005701150631715100227130ustar00rootroot00000000000000/* randombytes/devurandom.h version 20080713 D. J. Bernstein Public domain. */ #ifndef randombytes_devurandom_H #define randombytes_devurandom_H #ifdef __cplusplus extern "C" { #endif extern void randombytes(unsigned char *,unsigned long long); #ifdef __cplusplus } #endif #ifndef randombytes_implementation #define randombytes_implementation "devurandom" #endif #endif curvedns-curvedns-0.87/nacl/randombytes/do000066400000000000000000000021361150631715100207230ustar00rootroot00000000000000#!/bin/sh -e okabi | ( while read abi do rm -f randombytes.o randombytes.h ( echo devurandom ) | ( while read n do okc-$abi | ( while read c do echo "=== `date` === Trying $n.c with $c..." >&2 rm -f test randombytes-impl.o randombytes-impl.h randombytes-impl.c cp $n.c randombytes-impl.c || continue cp $n.h randombytes-impl.h || continue $c -c randombytes-impl.c || continue $c -o test test.c randombytes-impl.o || continue ./test || continue echo "=== `date` === Success. Using $n.c." >&2 mkdir -p lib/$abi mv randombytes-impl.o lib/$abi/randombytes.o mkdir -p include/$abi mv randombytes-impl.h include/$abi/randombytes.h exit 0 done exit 111 ) && exit 0 done exit 111 ) || ( echo ===== Giving up. >&2 rm -f test randombytes-impl.o randombytes-impl.h randombytes-impl.c exit 111 ) || exit 111 done exit 0 ) || exit 111 curvedns-curvedns-0.87/nacl/randombytes/test.c000066400000000000000000000004711150631715100215210ustar00rootroot00000000000000#include "randombytes-impl.h" unsigned char x[65536]; unsigned long long freq[256]; int main() { unsigned long long i; randombytes(x,sizeof x); for (i = 0;i < 256;++i) freq[i] = 0; for (i = 0;i < sizeof x;++i) ++freq[255 & (int) x[i]]; for (i = 0;i < 256;++i) if (!freq[i]) return 111; return 0; } curvedns-curvedns-0.87/nacl/tests/000077500000000000000000000000001150631715100172075ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/tests/auth.c000066400000000000000000000006051150631715100203150ustar00rootroot00000000000000#include #include "crypto_auth_hmacsha512256.h" /* "Test Case 2" from RFC 4231 */ unsigned char key[32] = "Jefe"; unsigned char c[28] = "what do ya want for nothing?"; unsigned char a[32]; main() { int i; crypto_auth_hmacsha512256(a,c,sizeof c,key); for (i = 0;i < 32;++i) { printf(",0x%02x",(unsigned int) a[i]); if (i % 8 == 7) printf("\n"); } return 0; } curvedns-curvedns-0.87/nacl/tests/auth.out000066400000000000000000000002441150631715100207010ustar00rootroot00000000000000,0x16,0x4b,0x7a,0x7b,0xfc,0xf8,0x19,0xe2 ,0xe3,0x95,0xfb,0xe7,0x3b,0x56,0xe0,0xa3 ,0x87,0xbd,0x64,0x22,0x2e,0x83,0x1f,0xd6 ,0x10,0x27,0x0c,0xd7,0xea,0x25,0x05,0x54 curvedns-curvedns-0.87/nacl/tests/auth2.c000066400000000000000000000014221150631715100203750ustar00rootroot00000000000000/* "Test Case AUTH256-4" from RFC 4868 */ #include #include "crypto_auth_hmacsha256.h" unsigned char key[32] = { 0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08 ,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,0x10 ,0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18 ,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f,0x20 } ; unsigned char c[50] = { 0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd ,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd ,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd ,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd ,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd ,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd ,0xcd,0xcd } ; unsigned char a[32]; main() { int i; crypto_auth_hmacsha256(a,c,sizeof c,key); for (i = 0;i < 32;++i) { printf(",0x%02x",(unsigned int) a[i]); if (i % 8 == 7) printf("\n"); } return 0; } curvedns-curvedns-0.87/nacl/tests/auth2.out000066400000000000000000000002441150631715100207630ustar00rootroot00000000000000,0x37,0x2e,0xfc,0xf9,0xb4,0x0b,0x35,0xc2 ,0x11,0x5b,0x13,0x46,0x90,0x3d,0x2e,0xf4 ,0x2f,0xce,0xd4,0x6f,0x08,0x46,0xe7,0x25 ,0x7b,0xb1,0x56,0xd3,0xd7,0xb3,0x0d,0x3f curvedns-curvedns-0.87/nacl/tests/auth3.c000066400000000000000000000015361150631715100204040ustar00rootroot00000000000000/* "Test Case AUTH256-4" from RFC 4868 */ #include #include "crypto_auth_hmacsha256.h" unsigned char key[32] = { 0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08 ,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,0x10 ,0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18 ,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f,0x20 } ; unsigned char c[50] = { 0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd ,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd ,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd ,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd ,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd ,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd ,0xcd,0xcd } ; unsigned char a[32] = { 0x37,0x2e,0xfc,0xf9,0xb4,0x0b,0x35,0xc2 ,0x11,0x5b,0x13,0x46,0x90,0x3d,0x2e,0xf4 ,0x2f,0xce,0xd4,0x6f,0x08,0x46,0xe7,0x25 ,0x7b,0xb1,0x56,0xd3,0xd7,0xb3,0x0d,0x3f } ; main() { printf("%d\n",crypto_auth_hmacsha256_verify(a,c,sizeof c,key)); return 0; } curvedns-curvedns-0.87/nacl/tests/auth3.out000066400000000000000000000000021150631715100207540ustar00rootroot000000000000000 curvedns-curvedns-0.87/nacl/tests/auth4.cpp000066400000000000000000000020461150631715100207420ustar00rootroot00000000000000/* "Test Case AUTH256-4" from RFC 4868 */ #include using std::string; #include #include "crypto_auth_hmacsha256.h" char key_bytes[32] = { 0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08 ,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,0x10 ,0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18 ,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f,0x20 } ; char c_bytes[50] = { 0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd ,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd ,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd ,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd ,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd ,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd,0xcd ,0xcd,0xcd } ; char a_bytes[32] = { 0x37,0x2e,0xfc,0xf9,0xb4,0x0b,0x35,0xc2 ,0x11,0x5b,0x13,0x46,0x90,0x3d,0x2e,0xf4 ,0x2f,0xce,0xd4,0x6f,0x08,0x46,0xe7,0x25 ,0x7b,0xb1,0x56,0xd3,0xd7,0xb3,0x0d,0x3f } ; main() { string key(key_bytes,sizeof key_bytes); string c(c_bytes,sizeof c_bytes); string a(a_bytes,sizeof a_bytes); try { crypto_auth_hmacsha256_verify(a,c,key); printf("0\n"); } catch(const char *s) { printf("%s\n",s); } return 0; } curvedns-curvedns-0.87/nacl/tests/auth4.out000066400000000000000000000000021150631715100207550ustar00rootroot000000000000000 curvedns-curvedns-0.87/nacl/tests/auth5.c000066400000000000000000000015501150631715100204020ustar00rootroot00000000000000#include #include #include "crypto_auth_hmacsha512256.h" #include "randombytes.h" unsigned char key[32]; unsigned char c[10000]; unsigned char a[32]; main() { int clen; int i; for (clen = 0;clen < 10000;++clen) { randombytes(key,sizeof key); randombytes(c,clen); crypto_auth_hmacsha512256(a,c,clen,key); if (crypto_auth_hmacsha512256_verify(a,c,clen,key) != 0) { printf("fail %d\n",clen); return 100; } if (clen > 0) { c[random() % clen] += 1 + (random() % 255); if (crypto_auth_hmacsha512256_verify(a,c,clen,key) == 0) { printf("forgery %d\n",clen); return 100; } a[random() % sizeof a] += 1 + (random() % 255); if (crypto_auth_hmacsha512256_verify(a,c,clen,key) == 0) { printf("forgery %d\n",clen); return 100; } } } return 0; } curvedns-curvedns-0.87/nacl/tests/auth5.out000066400000000000000000000000001150631715100207540ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/tests/auth6.cpp000066400000000000000000000022211150631715100207370ustar00rootroot00000000000000#include using std::string; #include #include #include "crypto_auth_hmacsha512256.h" #include "randombytes.h" main() { int clen; int i; for (clen = 0;clen < 10000;++clen) { unsigned char key_bytes[32]; randombytes(key_bytes,sizeof key_bytes); string key((char *) key_bytes,sizeof key_bytes); unsigned char c_bytes[clen]; randombytes(c_bytes,sizeof c_bytes); string c((char *) c_bytes,sizeof c_bytes); string a = crypto_auth_hmacsha512256(c,key); try { crypto_auth_hmacsha512256_verify(a,c,key); } catch(const char *s) { printf("fail %d %s\n",clen,s); return 100; } if (clen > 0) { size_t pos = random() % clen; c.replace(pos,1,1,c[pos] + 1 + (random() % 255)); try { crypto_auth_hmacsha512256_verify(a,c,key); printf("forgery %d\n",clen); } catch(const char *s) { ; } pos = random() % a.size(); a.replace(pos,1,1,a[pos] + 1 + (random() % 255)); try { crypto_auth_hmacsha512256_verify(a,c,key); printf("forgery %d\n",clen); } catch(const char *s) { ; } } } return 0; } curvedns-curvedns-0.87/nacl/tests/auth6.out000066400000000000000000000000001150631715100207550ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/tests/box.c000066400000000000000000000033731150631715100201510ustar00rootroot00000000000000#include #include "crypto_box_curve25519xsalsa20poly1305.h" unsigned char alicesk[32] = { 0x77,0x07,0x6d,0x0a,0x73,0x18,0xa5,0x7d ,0x3c,0x16,0xc1,0x72,0x51,0xb2,0x66,0x45 ,0xdf,0x4c,0x2f,0x87,0xeb,0xc0,0x99,0x2a ,0xb1,0x77,0xfb,0xa5,0x1d,0xb9,0x2c,0x2a } ; unsigned char bobpk[32] = { 0xde,0x9e,0xdb,0x7d,0x7b,0x7d,0xc1,0xb4 ,0xd3,0x5b,0x61,0xc2,0xec,0xe4,0x35,0x37 ,0x3f,0x83,0x43,0xc8,0x5b,0x78,0x67,0x4d ,0xad,0xfc,0x7e,0x14,0x6f,0x88,0x2b,0x4f } ; unsigned char nonce[24] = { 0x69,0x69,0x6e,0xe9,0x55,0xb6,0x2b,0x73 ,0xcd,0x62,0xbd,0xa8,0x75,0xfc,0x73,0xd6 ,0x82,0x19,0xe0,0x03,0x6b,0x7a,0x0b,0x37 } ; // API requires first 32 bytes to be 0 unsigned char m[163] = { 0, 0, 0, 0, 0, 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0 ,0xbe,0x07,0x5f,0xc5,0x3c,0x81,0xf2,0xd5 ,0xcf,0x14,0x13,0x16,0xeb,0xeb,0x0c,0x7b ,0x52,0x28,0xc5,0x2a,0x4c,0x62,0xcb,0xd4 ,0x4b,0x66,0x84,0x9b,0x64,0x24,0x4f,0xfc ,0xe5,0xec,0xba,0xaf,0x33,0xbd,0x75,0x1a ,0x1a,0xc7,0x28,0xd4,0x5e,0x6c,0x61,0x29 ,0x6c,0xdc,0x3c,0x01,0x23,0x35,0x61,0xf4 ,0x1d,0xb6,0x6c,0xce,0x31,0x4a,0xdb,0x31 ,0x0e,0x3b,0xe8,0x25,0x0c,0x46,0xf0,0x6d ,0xce,0xea,0x3a,0x7f,0xa1,0x34,0x80,0x57 ,0xe2,0xf6,0x55,0x6a,0xd6,0xb1,0x31,0x8a ,0x02,0x4a,0x83,0x8f,0x21,0xaf,0x1f,0xde ,0x04,0x89,0x77,0xeb,0x48,0xf5,0x9f,0xfd ,0x49,0x24,0xca,0x1c,0x60,0x90,0x2e,0x52 ,0xf0,0xa0,0x89,0xbc,0x76,0x89,0x70,0x40 ,0xe0,0x82,0xf9,0x37,0x76,0x38,0x48,0x64 ,0x5e,0x07,0x05 } ; unsigned char c[163]; main() { int i; crypto_box_curve25519xsalsa20poly1305( c,m,163,nonce,bobpk,alicesk ); for (i = 16;i < 163;++i) { printf(",0x%02x",(unsigned int) c[i]); if (i % 8 == 7) printf("\n"); } printf("\n"); return 0; } curvedns-curvedns-0.87/nacl/tests/box.out000066400000000000000000000013621150631715100205320ustar00rootroot00000000000000,0xf3,0xff,0xc7,0x70,0x3f,0x94,0x00,0xe5 ,0x2a,0x7d,0xfb,0x4b,0x3d,0x33,0x05,0xd9 ,0x8e,0x99,0x3b,0x9f,0x48,0x68,0x12,0x73 ,0xc2,0x96,0x50,0xba,0x32,0xfc,0x76,0xce ,0x48,0x33,0x2e,0xa7,0x16,0x4d,0x96,0xa4 ,0x47,0x6f,0xb8,0xc5,0x31,0xa1,0x18,0x6a ,0xc0,0xdf,0xc1,0x7c,0x98,0xdc,0xe8,0x7b ,0x4d,0xa7,0xf0,0x11,0xec,0x48,0xc9,0x72 ,0x71,0xd2,0xc2,0x0f,0x9b,0x92,0x8f,0xe2 ,0x27,0x0d,0x6f,0xb8,0x63,0xd5,0x17,0x38 ,0xb4,0x8e,0xee,0xe3,0x14,0xa7,0xcc,0x8a ,0xb9,0x32,0x16,0x45,0x48,0xe5,0x26,0xae ,0x90,0x22,0x43,0x68,0x51,0x7a,0xcf,0xea ,0xbd,0x6b,0xb3,0x73,0x2b,0xc0,0xe9,0xda ,0x99,0x83,0x2b,0x61,0xca,0x01,0xb6,0xde ,0x56,0x24,0x4a,0x9e,0x88,0xd5,0xf9,0xb3 ,0x79,0x73,0xf6,0x22,0xa4,0x3d,0x14,0xa6 ,0x59,0x9b,0x1f,0x65,0x4c,0xb4,0x5a,0x74 ,0xe3,0x55,0xa5 curvedns-curvedns-0.87/nacl/tests/box2.c000066400000000000000000000034371150631715100202340ustar00rootroot00000000000000#include #include "crypto_box_curve25519xsalsa20poly1305.h" unsigned char bobsk[32] = { 0x5d,0xab,0x08,0x7e,0x62,0x4a,0x8a,0x4b ,0x79,0xe1,0x7f,0x8b,0x83,0x80,0x0e,0xe6 ,0x6f,0x3b,0xb1,0x29,0x26,0x18,0xb6,0xfd ,0x1c,0x2f,0x8b,0x27,0xff,0x88,0xe0,0xeb } ; unsigned char alicepk[32] = { 0x85,0x20,0xf0,0x09,0x89,0x30,0xa7,0x54 ,0x74,0x8b,0x7d,0xdc,0xb4,0x3e,0xf7,0x5a ,0x0d,0xbf,0x3a,0x0d,0x26,0x38,0x1a,0xf4 ,0xeb,0xa4,0xa9,0x8e,0xaa,0x9b,0x4e,0x6a } ; unsigned char nonce[24] = { 0x69,0x69,0x6e,0xe9,0x55,0xb6,0x2b,0x73 ,0xcd,0x62,0xbd,0xa8,0x75,0xfc,0x73,0xd6 ,0x82,0x19,0xe0,0x03,0x6b,0x7a,0x0b,0x37 } ; // API requires first 16 bytes to be 0 unsigned char c[163] = { 0, 0, 0, 0, 0, 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0 ,0xf3,0xff,0xc7,0x70,0x3f,0x94,0x00,0xe5 ,0x2a,0x7d,0xfb,0x4b,0x3d,0x33,0x05,0xd9 ,0x8e,0x99,0x3b,0x9f,0x48,0x68,0x12,0x73 ,0xc2,0x96,0x50,0xba,0x32,0xfc,0x76,0xce ,0x48,0x33,0x2e,0xa7,0x16,0x4d,0x96,0xa4 ,0x47,0x6f,0xb8,0xc5,0x31,0xa1,0x18,0x6a ,0xc0,0xdf,0xc1,0x7c,0x98,0xdc,0xe8,0x7b ,0x4d,0xa7,0xf0,0x11,0xec,0x48,0xc9,0x72 ,0x71,0xd2,0xc2,0x0f,0x9b,0x92,0x8f,0xe2 ,0x27,0x0d,0x6f,0xb8,0x63,0xd5,0x17,0x38 ,0xb4,0x8e,0xee,0xe3,0x14,0xa7,0xcc,0x8a ,0xb9,0x32,0x16,0x45,0x48,0xe5,0x26,0xae ,0x90,0x22,0x43,0x68,0x51,0x7a,0xcf,0xea ,0xbd,0x6b,0xb3,0x73,0x2b,0xc0,0xe9,0xda ,0x99,0x83,0x2b,0x61,0xca,0x01,0xb6,0xde ,0x56,0x24,0x4a,0x9e,0x88,0xd5,0xf9,0xb3 ,0x79,0x73,0xf6,0x22,0xa4,0x3d,0x14,0xa6 ,0x59,0x9b,0x1f,0x65,0x4c,0xb4,0x5a,0x74 ,0xe3,0x55,0xa5 } ; unsigned char m[163]; main() { int i; if (crypto_box_curve25519xsalsa20poly1305_open( m,c,163,nonce,alicepk,bobsk ) == 0) { for (i = 32;i < 163;++i) { printf(",0x%02x",(unsigned int) m[i]); if (i % 8 == 7) printf("\n"); } printf("\n"); } return 0; } curvedns-curvedns-0.87/nacl/tests/box2.out000066400000000000000000000012401150631715100206070ustar00rootroot00000000000000,0xbe,0x07,0x5f,0xc5,0x3c,0x81,0xf2,0xd5 ,0xcf,0x14,0x13,0x16,0xeb,0xeb,0x0c,0x7b ,0x52,0x28,0xc5,0x2a,0x4c,0x62,0xcb,0xd4 ,0x4b,0x66,0x84,0x9b,0x64,0x24,0x4f,0xfc ,0xe5,0xec,0xba,0xaf,0x33,0xbd,0x75,0x1a ,0x1a,0xc7,0x28,0xd4,0x5e,0x6c,0x61,0x29 ,0x6c,0xdc,0x3c,0x01,0x23,0x35,0x61,0xf4 ,0x1d,0xb6,0x6c,0xce,0x31,0x4a,0xdb,0x31 ,0x0e,0x3b,0xe8,0x25,0x0c,0x46,0xf0,0x6d ,0xce,0xea,0x3a,0x7f,0xa1,0x34,0x80,0x57 ,0xe2,0xf6,0x55,0x6a,0xd6,0xb1,0x31,0x8a ,0x02,0x4a,0x83,0x8f,0x21,0xaf,0x1f,0xde ,0x04,0x89,0x77,0xeb,0x48,0xf5,0x9f,0xfd ,0x49,0x24,0xca,0x1c,0x60,0x90,0x2e,0x52 ,0xf0,0xa0,0x89,0xbc,0x76,0x89,0x70,0x40 ,0xe0,0x82,0xf9,0x37,0x76,0x38,0x48,0x64 ,0x5e,0x07,0x05 curvedns-curvedns-0.87/nacl/tests/box3.cpp000066400000000000000000000033751150631715100205760ustar00rootroot00000000000000#include using std::string; #include #include "crypto_box_curve25519xsalsa20poly1305.h" char alicesk_bytes[32] = { 0x77,0x07,0x6d,0x0a,0x73,0x18,0xa5,0x7d ,0x3c,0x16,0xc1,0x72,0x51,0xb2,0x66,0x45 ,0xdf,0x4c,0x2f,0x87,0xeb,0xc0,0x99,0x2a ,0xb1,0x77,0xfb,0xa5,0x1d,0xb9,0x2c,0x2a } ; char bobpk_bytes[32] = { 0xde,0x9e,0xdb,0x7d,0x7b,0x7d,0xc1,0xb4 ,0xd3,0x5b,0x61,0xc2,0xec,0xe4,0x35,0x37 ,0x3f,0x83,0x43,0xc8,0x5b,0x78,0x67,0x4d ,0xad,0xfc,0x7e,0x14,0x6f,0x88,0x2b,0x4f } ; char nonce_bytes[24] = { 0x69,0x69,0x6e,0xe9,0x55,0xb6,0x2b,0x73 ,0xcd,0x62,0xbd,0xa8,0x75,0xfc,0x73,0xd6 ,0x82,0x19,0xe0,0x03,0x6b,0x7a,0x0b,0x37 } ; char m_bytes[131] = { 0xbe,0x07,0x5f,0xc5,0x3c,0x81,0xf2,0xd5 ,0xcf,0x14,0x13,0x16,0xeb,0xeb,0x0c,0x7b ,0x52,0x28,0xc5,0x2a,0x4c,0x62,0xcb,0xd4 ,0x4b,0x66,0x84,0x9b,0x64,0x24,0x4f,0xfc ,0xe5,0xec,0xba,0xaf,0x33,0xbd,0x75,0x1a ,0x1a,0xc7,0x28,0xd4,0x5e,0x6c,0x61,0x29 ,0x6c,0xdc,0x3c,0x01,0x23,0x35,0x61,0xf4 ,0x1d,0xb6,0x6c,0xce,0x31,0x4a,0xdb,0x31 ,0x0e,0x3b,0xe8,0x25,0x0c,0x46,0xf0,0x6d ,0xce,0xea,0x3a,0x7f,0xa1,0x34,0x80,0x57 ,0xe2,0xf6,0x55,0x6a,0xd6,0xb1,0x31,0x8a ,0x02,0x4a,0x83,0x8f,0x21,0xaf,0x1f,0xde ,0x04,0x89,0x77,0xeb,0x48,0xf5,0x9f,0xfd ,0x49,0x24,0xca,0x1c,0x60,0x90,0x2e,0x52 ,0xf0,0xa0,0x89,0xbc,0x76,0x89,0x70,0x40 ,0xe0,0x82,0xf9,0x37,0x76,0x38,0x48,0x64 ,0x5e,0x07,0x05 } ; main() { int i; string m(m_bytes,sizeof m_bytes); string nonce(nonce_bytes,sizeof nonce_bytes); string bobpk(bobpk_bytes,sizeof bobpk_bytes); string alicesk(alicesk_bytes,sizeof alicesk_bytes); string c = crypto_box_curve25519xsalsa20poly1305(m,nonce,bobpk,alicesk); for (i = 0;i < c.size();++i) { printf(",0x%02x",(unsigned int) (unsigned char) c[i]); if (i % 8 == 7) printf("\n"); } printf("\n"); return 0; } curvedns-curvedns-0.87/nacl/tests/box3.out000066400000000000000000000013621150631715100206150ustar00rootroot00000000000000,0xf3,0xff,0xc7,0x70,0x3f,0x94,0x00,0xe5 ,0x2a,0x7d,0xfb,0x4b,0x3d,0x33,0x05,0xd9 ,0x8e,0x99,0x3b,0x9f,0x48,0x68,0x12,0x73 ,0xc2,0x96,0x50,0xba,0x32,0xfc,0x76,0xce ,0x48,0x33,0x2e,0xa7,0x16,0x4d,0x96,0xa4 ,0x47,0x6f,0xb8,0xc5,0x31,0xa1,0x18,0x6a ,0xc0,0xdf,0xc1,0x7c,0x98,0xdc,0xe8,0x7b ,0x4d,0xa7,0xf0,0x11,0xec,0x48,0xc9,0x72 ,0x71,0xd2,0xc2,0x0f,0x9b,0x92,0x8f,0xe2 ,0x27,0x0d,0x6f,0xb8,0x63,0xd5,0x17,0x38 ,0xb4,0x8e,0xee,0xe3,0x14,0xa7,0xcc,0x8a ,0xb9,0x32,0x16,0x45,0x48,0xe5,0x26,0xae ,0x90,0x22,0x43,0x68,0x51,0x7a,0xcf,0xea ,0xbd,0x6b,0xb3,0x73,0x2b,0xc0,0xe9,0xda ,0x99,0x83,0x2b,0x61,0xca,0x01,0xb6,0xde ,0x56,0x24,0x4a,0x9e,0x88,0xd5,0xf9,0xb3 ,0x79,0x73,0xf6,0x22,0xa4,0x3d,0x14,0xa6 ,0x59,0x9b,0x1f,0x65,0x4c,0xb4,0x5a,0x74 ,0xe3,0x55,0xa5 curvedns-curvedns-0.87/nacl/tests/box4.cpp000066400000000000000000000036351150631715100205760ustar00rootroot00000000000000#include using std::string; #include #include "crypto_box_curve25519xsalsa20poly1305.h" char bobsk_bytes[32] = { 0x5d,0xab,0x08,0x7e,0x62,0x4a,0x8a,0x4b ,0x79,0xe1,0x7f,0x8b,0x83,0x80,0x0e,0xe6 ,0x6f,0x3b,0xb1,0x29,0x26,0x18,0xb6,0xfd ,0x1c,0x2f,0x8b,0x27,0xff,0x88,0xe0,0xeb } ; char alicepk_bytes[32] = { 0x85,0x20,0xf0,0x09,0x89,0x30,0xa7,0x54 ,0x74,0x8b,0x7d,0xdc,0xb4,0x3e,0xf7,0x5a ,0x0d,0xbf,0x3a,0x0d,0x26,0x38,0x1a,0xf4 ,0xeb,0xa4,0xa9,0x8e,0xaa,0x9b,0x4e,0x6a } ; char nonce_bytes[24] = { 0x69,0x69,0x6e,0xe9,0x55,0xb6,0x2b,0x73 ,0xcd,0x62,0xbd,0xa8,0x75,0xfc,0x73,0xd6 ,0x82,0x19,0xe0,0x03,0x6b,0x7a,0x0b,0x37 } ; char c_bytes[147] = { 0xf3,0xff,0xc7,0x70,0x3f,0x94,0x00,0xe5 ,0x2a,0x7d,0xfb,0x4b,0x3d,0x33,0x05,0xd9 ,0x8e,0x99,0x3b,0x9f,0x48,0x68,0x12,0x73 ,0xc2,0x96,0x50,0xba,0x32,0xfc,0x76,0xce ,0x48,0x33,0x2e,0xa7,0x16,0x4d,0x96,0xa4 ,0x47,0x6f,0xb8,0xc5,0x31,0xa1,0x18,0x6a ,0xc0,0xdf,0xc1,0x7c,0x98,0xdc,0xe8,0x7b ,0x4d,0xa7,0xf0,0x11,0xec,0x48,0xc9,0x72 ,0x71,0xd2,0xc2,0x0f,0x9b,0x92,0x8f,0xe2 ,0x27,0x0d,0x6f,0xb8,0x63,0xd5,0x17,0x38 ,0xb4,0x8e,0xee,0xe3,0x14,0xa7,0xcc,0x8a ,0xb9,0x32,0x16,0x45,0x48,0xe5,0x26,0xae ,0x90,0x22,0x43,0x68,0x51,0x7a,0xcf,0xea ,0xbd,0x6b,0xb3,0x73,0x2b,0xc0,0xe9,0xda ,0x99,0x83,0x2b,0x61,0xca,0x01,0xb6,0xde ,0x56,0x24,0x4a,0x9e,0x88,0xd5,0xf9,0xb3 ,0x79,0x73,0xf6,0x22,0xa4,0x3d,0x14,0xa6 ,0x59,0x9b,0x1f,0x65,0x4c,0xb4,0x5a,0x74 ,0xe3,0x55,0xa5 } ; main() { int i; string c(c_bytes,sizeof c_bytes); string nonce(nonce_bytes,sizeof nonce_bytes); string alicepk(alicepk_bytes,sizeof alicepk_bytes); string bobsk(bobsk_bytes,sizeof bobsk_bytes); try { string m = crypto_box_curve25519xsalsa20poly1305_open(c,nonce,alicepk,bobsk); for (i = 0;i < m.size();++i) { printf(",0x%02x",(unsigned int) (unsigned char) m[i]); if (i % 8 == 7) printf("\n"); } printf("\n"); } catch(const char *s) { printf("%s\n",s); } return 0; } curvedns-curvedns-0.87/nacl/tests/box4.out000066400000000000000000000012401150631715100206110ustar00rootroot00000000000000,0xbe,0x07,0x5f,0xc5,0x3c,0x81,0xf2,0xd5 ,0xcf,0x14,0x13,0x16,0xeb,0xeb,0x0c,0x7b ,0x52,0x28,0xc5,0x2a,0x4c,0x62,0xcb,0xd4 ,0x4b,0x66,0x84,0x9b,0x64,0x24,0x4f,0xfc ,0xe5,0xec,0xba,0xaf,0x33,0xbd,0x75,0x1a ,0x1a,0xc7,0x28,0xd4,0x5e,0x6c,0x61,0x29 ,0x6c,0xdc,0x3c,0x01,0x23,0x35,0x61,0xf4 ,0x1d,0xb6,0x6c,0xce,0x31,0x4a,0xdb,0x31 ,0x0e,0x3b,0xe8,0x25,0x0c,0x46,0xf0,0x6d ,0xce,0xea,0x3a,0x7f,0xa1,0x34,0x80,0x57 ,0xe2,0xf6,0x55,0x6a,0xd6,0xb1,0x31,0x8a ,0x02,0x4a,0x83,0x8f,0x21,0xaf,0x1f,0xde ,0x04,0x89,0x77,0xeb,0x48,0xf5,0x9f,0xfd ,0x49,0x24,0xca,0x1c,0x60,0x90,0x2e,0x52 ,0xf0,0xa0,0x89,0xbc,0x76,0x89,0x70,0x40 ,0xe0,0x82,0xf9,0x37,0x76,0x38,0x48,0x64 ,0x5e,0x07,0x05 curvedns-curvedns-0.87/nacl/tests/box5.cpp000066400000000000000000000014161150631715100205720ustar00rootroot00000000000000#include using std::string; #include #include "crypto_box.h" #include "randombytes.h" main() { int mlen; for (mlen = 0;mlen < 1000;++mlen) { string alicesk; string alicepk = crypto_box_keypair(&alicesk); string bobsk; string bobpk = crypto_box_keypair(&bobsk); unsigned char nbytes[crypto_box_NONCEBYTES]; randombytes(nbytes,crypto_box_NONCEBYTES); string n((char *) nbytes,crypto_box_NONCEBYTES); unsigned char mbytes[mlen]; randombytes(mbytes,mlen); string m((char *) mbytes,mlen); string c = crypto_box(m,n,bobpk,alicesk); try { string m2 = crypto_box_open(c,n,alicepk,bobsk); if (m != m2) printf("bad decryption\n"); } catch(const char *s) { printf("%s\n",s); } } return 0; } curvedns-curvedns-0.87/nacl/tests/box5.out000066400000000000000000000000001150631715100206030ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/tests/box6.cpp000066400000000000000000000020201150631715100205630ustar00rootroot00000000000000#include using std::string; #include #include #include "crypto_box.h" #include "randombytes.h" main() { int mlen; for (mlen = 0;mlen < 1000;++mlen) { string alicesk; string alicepk = crypto_box_keypair(&alicesk); string bobsk; string bobpk = crypto_box_keypair(&bobsk); unsigned char nbytes[crypto_box_NONCEBYTES]; randombytes(nbytes,crypto_box_NONCEBYTES); string n((char *) nbytes,crypto_box_NONCEBYTES); unsigned char mbytes[mlen]; randombytes(mbytes,mlen); string m((char *) mbytes,mlen); string c = crypto_box(m,n,bobpk,alicesk); int caught = 0; while (caught < 10) { c.replace(random() % c.size(),1,1,random()); try { string m2 = crypto_box_open(c,n,alicepk,bobsk); if (m != m2) { printf("forgery\n"); return 100; } } catch(const char *s) { if (string(s) == string("ciphertext fails verification")) ++caught; else { printf("%s\n",s); return 111; } } } } return 0; } curvedns-curvedns-0.87/nacl/tests/box6.out000066400000000000000000000000001150631715100206040ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/tests/box7.c000066400000000000000000000017721150631715100202410ustar00rootroot00000000000000#include #include "crypto_box.h" #include "randombytes.h" unsigned char alicesk[crypto_box_SECRETKEYBYTES]; unsigned char alicepk[crypto_box_PUBLICKEYBYTES]; unsigned char bobsk[crypto_box_SECRETKEYBYTES]; unsigned char bobpk[crypto_box_PUBLICKEYBYTES]; unsigned char n[crypto_box_NONCEBYTES]; unsigned char m[10000]; unsigned char c[10000]; unsigned char m2[10000]; main() { int mlen; int i; for (mlen = 0;mlen < 1000 && mlen + crypto_box_ZEROBYTES < sizeof m;++mlen) { crypto_box_keypair(alicepk,alicesk); crypto_box_keypair(bobpk,bobsk); randombytes(n,crypto_box_NONCEBYTES); randombytes(m + crypto_box_ZEROBYTES,mlen); crypto_box(c,m,mlen + crypto_box_ZEROBYTES,n,bobpk,alicesk); if (crypto_box_open(m2,c,mlen + crypto_box_ZEROBYTES,n,alicepk,bobsk) == 0) { for (i = 0;i < mlen + crypto_box_ZEROBYTES;++i) if (m2[i] != m[i]) { printf("bad decryption\n"); break; } } else { printf("ciphertext fails verification\n"); } } return 0; } curvedns-curvedns-0.87/nacl/tests/box7.out000066400000000000000000000000001150631715100206050ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/tests/box8.c000066400000000000000000000021451150631715100202350ustar00rootroot00000000000000#include #include "crypto_box.h" #include "randombytes.h" unsigned char alicesk[crypto_box_SECRETKEYBYTES]; unsigned char alicepk[crypto_box_PUBLICKEYBYTES]; unsigned char bobsk[crypto_box_SECRETKEYBYTES]; unsigned char bobpk[crypto_box_PUBLICKEYBYTES]; unsigned char n[crypto_box_NONCEBYTES]; unsigned char m[10000]; unsigned char c[10000]; unsigned char m2[10000]; main() { int mlen; int i; int caught; for (mlen = 0;mlen < 1000 && mlen + crypto_box_ZEROBYTES < sizeof m;++mlen) { crypto_box_keypair(alicepk,alicesk); crypto_box_keypair(bobpk,bobsk); randombytes(n,crypto_box_NONCEBYTES); randombytes(m + crypto_box_ZEROBYTES,mlen); crypto_box(c,m,mlen + crypto_box_ZEROBYTES,n,bobpk,alicesk); caught = 0; while (caught < 10) { c[random() % (mlen + crypto_box_ZEROBYTES)] = random(); if (crypto_box_open(m2,c,mlen + crypto_box_ZEROBYTES,n,alicepk,bobsk) == 0) { for (i = 0;i < mlen + crypto_box_ZEROBYTES;++i) if (m2[i] != m[i]) { printf("forgery\n"); return 100; } } else { ++caught; } } } return 0; } curvedns-curvedns-0.87/nacl/tests/box8.out000066400000000000000000000000001150631715100206060ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/tests/core1.c000066400000000000000000000012311150631715100203610ustar00rootroot00000000000000#include #include "crypto_core_hsalsa20.h" unsigned char shared[32] = { 0x4a,0x5d,0x9d,0x5b,0xa4,0xce,0x2d,0xe1 ,0x72,0x8e,0x3b,0xf4,0x80,0x35,0x0f,0x25 ,0xe0,0x7e,0x21,0xc9,0x47,0xd1,0x9e,0x33 ,0x76,0xf0,0x9b,0x3c,0x1e,0x16,0x17,0x42 } ; unsigned char zero[32] = { 0 }; unsigned char c[16] = { 0x65,0x78,0x70,0x61,0x6e,0x64,0x20,0x33 ,0x32,0x2d,0x62,0x79,0x74,0x65,0x20,0x6b } ; unsigned char firstkey[32]; main() { int i; crypto_core_hsalsa20(firstkey,zero,shared,c); for (i = 0;i < 32;++i) { if (i > 0) printf(","); else printf(" "); printf("0x%02x",(unsigned int) firstkey[i]); if (i % 8 == 7) printf("\n"); } return 0; } curvedns-curvedns-0.87/nacl/tests/core1.out000066400000000000000000000002441150631715100207510ustar00rootroot00000000000000 0x1b,0x27,0x55,0x64,0x73,0xe9,0x85,0xd4 ,0x62,0xcd,0x51,0x19,0x7a,0x9a,0x46,0xc7 ,0x60,0x09,0x54,0x9e,0xac,0x64,0x74,0xf2 ,0x06,0xc4,0xee,0x08,0x44,0xf6,0x83,0x89 curvedns-curvedns-0.87/nacl/tests/core2.c000066400000000000000000000013771150631715100203750ustar00rootroot00000000000000#include #include "crypto_core_hsalsa20.h" unsigned char firstkey[32] = { 0x1b,0x27,0x55,0x64,0x73,0xe9,0x85,0xd4 ,0x62,0xcd,0x51,0x19,0x7a,0x9a,0x46,0xc7 ,0x60,0x09,0x54,0x9e,0xac,0x64,0x74,0xf2 ,0x06,0xc4,0xee,0x08,0x44,0xf6,0x83,0x89 } ; unsigned char nonceprefix[16] = { 0x69,0x69,0x6e,0xe9,0x55,0xb6,0x2b,0x73 ,0xcd,0x62,0xbd,0xa8,0x75,0xfc,0x73,0xd6 } ; unsigned char c[16] = { 0x65,0x78,0x70,0x61,0x6e,0x64,0x20,0x33 ,0x32,0x2d,0x62,0x79,0x74,0x65,0x20,0x6b } ; unsigned char secondkey[32]; main() { int i; crypto_core_hsalsa20(secondkey,nonceprefix,firstkey,c); for (i = 0;i < 32;++i) { if (i > 0) printf(","); else printf(" "); printf("0x%02x",(unsigned int) secondkey[i]); if (i % 8 == 7) printf("\n"); } return 0; } curvedns-curvedns-0.87/nacl/tests/core2.out000066400000000000000000000002441150631715100207520ustar00rootroot00000000000000 0xdc,0x90,0x8d,0xda,0x0b,0x93,0x44,0xa9 ,0x53,0x62,0x9b,0x73,0x38,0x20,0x77,0x88 ,0x80,0xf3,0xce,0xb4,0x21,0xbb,0x61,0xb9 ,0x1c,0xbd,0x4c,0x3e,0x66,0x25,0x6c,0xe4 curvedns-curvedns-0.87/nacl/tests/core3.c000066400000000000000000000016151150631715100203710ustar00rootroot00000000000000#include #include "crypto_core_salsa20.h" #include "crypto_hash_sha256.h" unsigned char secondkey[32] = { 0xdc,0x90,0x8d,0xda,0x0b,0x93,0x44,0xa9 ,0x53,0x62,0x9b,0x73,0x38,0x20,0x77,0x88 ,0x80,0xf3,0xce,0xb4,0x21,0xbb,0x61,0xb9 ,0x1c,0xbd,0x4c,0x3e,0x66,0x25,0x6c,0xe4 } ; unsigned char noncesuffix[8] = { 0x82,0x19,0xe0,0x03,0x6b,0x7a,0x0b,0x37 } ; unsigned char c[16] = { 0x65,0x78,0x70,0x61,0x6e,0x64,0x20,0x33 ,0x32,0x2d,0x62,0x79,0x74,0x65,0x20,0x6b } ; unsigned char in[16] = { 0 } ; unsigned char output[64 * 256 * 256]; unsigned char h[32]; main() { int i; long long pos = 0; for (i = 0;i < 8;++i) in[i] = noncesuffix[i]; do { do { crypto_core_salsa20(output + pos,in,secondkey,c); pos += 64; } while (++in[8]); } while (++in[9]); crypto_hash_sha256(h,output,sizeof output); for (i = 0;i < 32;++i) printf("%02x",h[i]); printf("\n"); return 0; } curvedns-curvedns-0.87/nacl/tests/core3.out000066400000000000000000000001011150631715100207430ustar00rootroot00000000000000662b9d0e3463029156069b12f918691a98f7dfb2ca0393c96bbfc6b1fbd630a2 curvedns-curvedns-0.87/nacl/tests/core4.c000066400000000000000000000012101150631715100203610ustar00rootroot00000000000000#include #include "crypto_core_salsa20.h" unsigned char k[32] = { 1, 2, 3, 4, 5, 6, 7, 8 , 9, 10, 11, 12, 13, 14, 15, 16 ,201,202,203,204,205,206,207,208 ,209,210,211,212,213,214,215,216 } ; unsigned char in[16] = { 101,102,103,104,105,106,107,108 ,109,110,111,112,113,114,115,116 } ; unsigned char c[16] = { 101,120,112, 97,110,100, 32, 51 , 50, 45, 98,121,116,101, 32,107 } ; unsigned char out[64]; main() { int i; crypto_core_salsa20(out,in,k,c); for (i = 0;i < 64;++i) { if (i > 0) printf(","); else printf(" "); printf("%3d",(unsigned int) out[i]); if (i % 8 == 7) printf("\n"); } return 0; } curvedns-curvedns-0.87/nacl/tests/core4.out000066400000000000000000000004101150631715100207470ustar00rootroot00000000000000 69, 37, 68, 39, 41, 15,107,193 ,255,139,122, 6,170,233,217, 98 , 89,144,182,106, 21, 51,200, 65 ,239, 49,222, 34,215,114, 40,126 ,104,197, 7,225,197,153, 31, 2 ,102, 78, 76,176, 84,245,246,184 ,177,160,133,130, 6, 72,149,119 ,192,195,132,236,234,103,246, 74 curvedns-curvedns-0.87/nacl/tests/core5.c000066400000000000000000000012201150631715100203630ustar00rootroot00000000000000#include #include "crypto_core_hsalsa20.h" unsigned char k[32] = { 0xee,0x30,0x4f,0xca,0x27,0x00,0x8d,0x8c ,0x12,0x6f,0x90,0x02,0x79,0x01,0xd8,0x0f ,0x7f,0x1d,0x8b,0x8d,0xc9,0x36,0xcf,0x3b ,0x9f,0x81,0x96,0x92,0x82,0x7e,0x57,0x77 } ; unsigned char in[16] = { 0x81,0x91,0x8e,0xf2,0xa5,0xe0,0xda,0x9b ,0x3e,0x90,0x60,0x52,0x1e,0x4b,0xb3,0x52 } ; unsigned char c[16] = { 101,120,112, 97,110,100, 32, 51 , 50, 45, 98,121,116,101, 32,107 } ; unsigned char out[32]; main() { int i; crypto_core_hsalsa20(out,in,k,c); for (i = 0;i < 32;++i) { printf(",0x%02x",(unsigned int) out[i]); if (i % 8 == 7) printf("\n"); } return 0; } curvedns-curvedns-0.87/nacl/tests/core5.out000066400000000000000000000002441150631715100207550ustar00rootroot00000000000000,0xbc,0x1b,0x30,0xfc,0x07,0x2c,0xc1,0x40 ,0x75,0xe4,0xba,0xa7,0x31,0xb5,0xa8,0x45 ,0xea,0x9b,0x11,0xe9,0xa5,0x19,0x1f,0x94 ,0xe1,0x8c,0xba,0x8f,0xd8,0x21,0xa7,0xcd curvedns-curvedns-0.87/nacl/tests/core6.c000066400000000000000000000020161150631715100203700ustar00rootroot00000000000000#include #include "crypto_core_salsa20.h" unsigned char k[32] = { 0xee,0x30,0x4f,0xca,0x27,0x00,0x8d,0x8c ,0x12,0x6f,0x90,0x02,0x79,0x01,0xd8,0x0f ,0x7f,0x1d,0x8b,0x8d,0xc9,0x36,0xcf,0x3b ,0x9f,0x81,0x96,0x92,0x82,0x7e,0x57,0x77 } ; unsigned char in[16] = { 0x81,0x91,0x8e,0xf2,0xa5,0xe0,0xda,0x9b ,0x3e,0x90,0x60,0x52,0x1e,0x4b,0xb3,0x52 } ; unsigned char c[16] = { 101,120,112, 97,110,100, 32, 51 , 50, 45, 98,121,116,101, 32,107 } ; unsigned char out[64]; void print(unsigned char *x,unsigned char *y) { int i; unsigned int borrow = 0; for (i = 0;i < 4;++i) { unsigned int xi = x[i]; unsigned int yi = y[i]; printf(",0x%02x",255 & (xi - yi - borrow)); borrow = (xi < yi + borrow); } } main() { crypto_core_salsa20(out,in,k,c); print(out,c); print(out + 20,c + 4); printf("\n"); print(out + 40,c + 8); print(out + 60,c + 12); printf("\n"); print(out + 24,in); print(out + 28,in + 4); printf("\n"); print(out + 32,in + 8); print(out + 36,in + 12); printf("\n"); return 0; } curvedns-curvedns-0.87/nacl/tests/core6.out000066400000000000000000000002441150631715100207560ustar00rootroot00000000000000,0xbc,0x1b,0x30,0xfc,0x07,0x2c,0xc1,0x40 ,0x75,0xe4,0xba,0xa7,0x31,0xb5,0xa8,0x45 ,0xea,0x9b,0x11,0xe9,0xa5,0x19,0x1f,0x94 ,0xe1,0x8c,0xba,0x8f,0xd8,0x21,0xa7,0xcd curvedns-curvedns-0.87/nacl/tests/hash.c000066400000000000000000000004211150631715100202730ustar00rootroot00000000000000#include #include "crypto_hash.h" unsigned char x[8] = "testing\n"; unsigned char h[crypto_hash_BYTES]; int main() { int i; crypto_hash(h,x,sizeof x); for (i = 0;i < crypto_hash_BYTES;++i) printf("%02x",(unsigned int) h[i]); printf("\n"); return 0; } curvedns-curvedns-0.87/nacl/tests/hash.out000066400000000000000000000002011150631715100206540ustar00rootroot0000000000000024f950aac7b9ea9b3cb728228a0c82b67c39e96b4b344798870d5daee93e3ae5931baae8c7cacfea4b629452c38026a81d138bc7aad1af3ef7bfd5ec646d6c28 curvedns-curvedns-0.87/nacl/tests/hash2.cpp000066400000000000000000000005221150631715100207170ustar00rootroot00000000000000#include #include using std::string; using std::cout; using std::hex; #include "crypto_hash.h" int main() { string x = "testing\n"; string h = crypto_hash(x); for (int i = 0;i < h.size();++i) { cout << hex << (15 & (int) (h[i] >> 4)); cout << hex << (15 & (int) h[i]); } cout << "\n"; return 0; } curvedns-curvedns-0.87/nacl/tests/hash2.out000066400000000000000000000002011150631715100207360ustar00rootroot0000000000000024f950aac7b9ea9b3cb728228a0c82b67c39e96b4b344798870d5daee93e3ae5931baae8c7cacfea4b629452c38026a81d138bc7aad1af3ef7bfd5ec646d6c28 curvedns-curvedns-0.87/nacl/tests/hash3.c000066400000000000000000000004551150631715100203650ustar00rootroot00000000000000#include #include "crypto_hash_sha512.h" unsigned char x[8] = "testing\n"; unsigned char h[crypto_hash_sha512_BYTES]; int main() { int i; crypto_hash_sha512(h,x,sizeof x); for (i = 0;i < crypto_hash_sha512_BYTES;++i) printf("%02x",(unsigned int) h[i]); printf("\n"); return 0; } curvedns-curvedns-0.87/nacl/tests/hash3.out000066400000000000000000000002011150631715100207370ustar00rootroot0000000000000024f950aac7b9ea9b3cb728228a0c82b67c39e96b4b344798870d5daee93e3ae5931baae8c7cacfea4b629452c38026a81d138bc7aad1af3ef7bfd5ec646d6c28 curvedns-curvedns-0.87/nacl/tests/hash4.cpp000066400000000000000000000005401150631715100207210ustar00rootroot00000000000000#include #include using std::string; using std::cout; using std::hex; #include "crypto_hash_sha512.h" int main() { string x = "testing\n"; string h = crypto_hash_sha512(x); for (int i = 0;i < h.size();++i) { cout << hex << (15 & (int) (h[i] >> 4)); cout << hex << (15 & (int) h[i]); } cout << "\n"; return 0; } curvedns-curvedns-0.87/nacl/tests/hash4.out000066400000000000000000000002011150631715100207400ustar00rootroot0000000000000024f950aac7b9ea9b3cb728228a0c82b67c39e96b4b344798870d5daee93e3ae5931baae8c7cacfea4b629452c38026a81d138bc7aad1af3ef7bfd5ec646d6c28 curvedns-curvedns-0.87/nacl/tests/onetimeauth.c000066400000000000000000000022121150631715100216720ustar00rootroot00000000000000#include #include "crypto_onetimeauth_poly1305.h" unsigned char rs[32] = { 0xee,0xa6,0xa7,0x25,0x1c,0x1e,0x72,0x91 ,0x6d,0x11,0xc2,0xcb,0x21,0x4d,0x3c,0x25 ,0x25,0x39,0x12,0x1d,0x8e,0x23,0x4e,0x65 ,0x2d,0x65,0x1f,0xa4,0xc8,0xcf,0xf8,0x80 } ; unsigned char c[131] = { 0x8e,0x99,0x3b,0x9f,0x48,0x68,0x12,0x73 ,0xc2,0x96,0x50,0xba,0x32,0xfc,0x76,0xce ,0x48,0x33,0x2e,0xa7,0x16,0x4d,0x96,0xa4 ,0x47,0x6f,0xb8,0xc5,0x31,0xa1,0x18,0x6a ,0xc0,0xdf,0xc1,0x7c,0x98,0xdc,0xe8,0x7b ,0x4d,0xa7,0xf0,0x11,0xec,0x48,0xc9,0x72 ,0x71,0xd2,0xc2,0x0f,0x9b,0x92,0x8f,0xe2 ,0x27,0x0d,0x6f,0xb8,0x63,0xd5,0x17,0x38 ,0xb4,0x8e,0xee,0xe3,0x14,0xa7,0xcc,0x8a ,0xb9,0x32,0x16,0x45,0x48,0xe5,0x26,0xae ,0x90,0x22,0x43,0x68,0x51,0x7a,0xcf,0xea ,0xbd,0x6b,0xb3,0x73,0x2b,0xc0,0xe9,0xda ,0x99,0x83,0x2b,0x61,0xca,0x01,0xb6,0xde ,0x56,0x24,0x4a,0x9e,0x88,0xd5,0xf9,0xb3 ,0x79,0x73,0xf6,0x22,0xa4,0x3d,0x14,0xa6 ,0x59,0x9b,0x1f,0x65,0x4c,0xb4,0x5a,0x74 ,0xe3,0x55,0xa5 } ; unsigned char a[16]; main() { int i; crypto_onetimeauth_poly1305(a,c,131,rs); for (i = 0;i < 16;++i) { printf(",0x%02x",(unsigned int) a[i]); if (i % 8 == 7) printf("\n"); } return 0; } curvedns-curvedns-0.87/nacl/tests/onetimeauth.out000066400000000000000000000001221150631715100222550ustar00rootroot00000000000000,0xf3,0xff,0xc7,0x70,0x3f,0x94,0x00,0xe5 ,0x2a,0x7d,0xfb,0x4b,0x3d,0x33,0x05,0xd9 curvedns-curvedns-0.87/nacl/tests/onetimeauth2.c000066400000000000000000000022041150631715100217550ustar00rootroot00000000000000#include #include "crypto_onetimeauth_poly1305.h" unsigned char rs[32] = { 0xee,0xa6,0xa7,0x25,0x1c,0x1e,0x72,0x91 ,0x6d,0x11,0xc2,0xcb,0x21,0x4d,0x3c,0x25 ,0x25,0x39,0x12,0x1d,0x8e,0x23,0x4e,0x65 ,0x2d,0x65,0x1f,0xa4,0xc8,0xcf,0xf8,0x80 } ; unsigned char c[131] = { 0x8e,0x99,0x3b,0x9f,0x48,0x68,0x12,0x73 ,0xc2,0x96,0x50,0xba,0x32,0xfc,0x76,0xce ,0x48,0x33,0x2e,0xa7,0x16,0x4d,0x96,0xa4 ,0x47,0x6f,0xb8,0xc5,0x31,0xa1,0x18,0x6a ,0xc0,0xdf,0xc1,0x7c,0x98,0xdc,0xe8,0x7b ,0x4d,0xa7,0xf0,0x11,0xec,0x48,0xc9,0x72 ,0x71,0xd2,0xc2,0x0f,0x9b,0x92,0x8f,0xe2 ,0x27,0x0d,0x6f,0xb8,0x63,0xd5,0x17,0x38 ,0xb4,0x8e,0xee,0xe3,0x14,0xa7,0xcc,0x8a ,0xb9,0x32,0x16,0x45,0x48,0xe5,0x26,0xae ,0x90,0x22,0x43,0x68,0x51,0x7a,0xcf,0xea ,0xbd,0x6b,0xb3,0x73,0x2b,0xc0,0xe9,0xda ,0x99,0x83,0x2b,0x61,0xca,0x01,0xb6,0xde ,0x56,0x24,0x4a,0x9e,0x88,0xd5,0xf9,0xb3 ,0x79,0x73,0xf6,0x22,0xa4,0x3d,0x14,0xa6 ,0x59,0x9b,0x1f,0x65,0x4c,0xb4,0x5a,0x74 ,0xe3,0x55,0xa5 } ; unsigned char a[16] = { 0xf3,0xff,0xc7,0x70,0x3f,0x94,0x00,0xe5 ,0x2a,0x7d,0xfb,0x4b,0x3d,0x33,0x05,0xd9 } ; main() { printf("%d\n",crypto_onetimeauth_poly1305_verify(a,c,131,rs)); return 0; } curvedns-curvedns-0.87/nacl/tests/onetimeauth2.out000066400000000000000000000000021150631715100223340ustar00rootroot000000000000000 curvedns-curvedns-0.87/nacl/tests/onetimeauth5.cpp000066400000000000000000000024171150631715100223260ustar00rootroot00000000000000#include using std::string; #include #include "crypto_onetimeauth_poly1305.h" char rs_bytes[32] = { 0xee,0xa6,0xa7,0x25,0x1c,0x1e,0x72,0x91 ,0x6d,0x11,0xc2,0xcb,0x21,0x4d,0x3c,0x25 ,0x25,0x39,0x12,0x1d,0x8e,0x23,0x4e,0x65 ,0x2d,0x65,0x1f,0xa4,0xc8,0xcf,0xf8,0x80 } ; char c_bytes[131] = { 0x8e,0x99,0x3b,0x9f,0x48,0x68,0x12,0x73 ,0xc2,0x96,0x50,0xba,0x32,0xfc,0x76,0xce ,0x48,0x33,0x2e,0xa7,0x16,0x4d,0x96,0xa4 ,0x47,0x6f,0xb8,0xc5,0x31,0xa1,0x18,0x6a ,0xc0,0xdf,0xc1,0x7c,0x98,0xdc,0xe8,0x7b ,0x4d,0xa7,0xf0,0x11,0xec,0x48,0xc9,0x72 ,0x71,0xd2,0xc2,0x0f,0x9b,0x92,0x8f,0xe2 ,0x27,0x0d,0x6f,0xb8,0x63,0xd5,0x17,0x38 ,0xb4,0x8e,0xee,0xe3,0x14,0xa7,0xcc,0x8a ,0xb9,0x32,0x16,0x45,0x48,0xe5,0x26,0xae ,0x90,0x22,0x43,0x68,0x51,0x7a,0xcf,0xea ,0xbd,0x6b,0xb3,0x73,0x2b,0xc0,0xe9,0xda ,0x99,0x83,0x2b,0x61,0xca,0x01,0xb6,0xde ,0x56,0x24,0x4a,0x9e,0x88,0xd5,0xf9,0xb3 ,0x79,0x73,0xf6,0x22,0xa4,0x3d,0x14,0xa6 ,0x59,0x9b,0x1f,0x65,0x4c,0xb4,0x5a,0x74 ,0xe3,0x55,0xa5 } ; unsigned char a[16]; main() { int i; string c(c_bytes,sizeof c_bytes); string rs(rs_bytes,sizeof rs_bytes); string a = crypto_onetimeauth_poly1305(c,rs); for (i = 0;i < a.size();++i) { printf(",0x%02x",(unsigned int) (unsigned char) a[i]); if (i % 8 == 7) printf("\n"); } return 0; } curvedns-curvedns-0.87/nacl/tests/onetimeauth5.out000066400000000000000000000001221150631715100223420ustar00rootroot00000000000000,0xf3,0xff,0xc7,0x70,0x3f,0x94,0x00,0xe5 ,0x2a,0x7d,0xfb,0x4b,0x3d,0x33,0x05,0xd9 curvedns-curvedns-0.87/nacl/tests/onetimeauth6.cpp000066400000000000000000000025161150631715100223270ustar00rootroot00000000000000#include using std::string; #include #include "crypto_onetimeauth_poly1305.h" char rs_bytes[32] = { 0xee,0xa6,0xa7,0x25,0x1c,0x1e,0x72,0x91 ,0x6d,0x11,0xc2,0xcb,0x21,0x4d,0x3c,0x25 ,0x25,0x39,0x12,0x1d,0x8e,0x23,0x4e,0x65 ,0x2d,0x65,0x1f,0xa4,0xc8,0xcf,0xf8,0x80 } ; char c_bytes[131] = { 0x8e,0x99,0x3b,0x9f,0x48,0x68,0x12,0x73 ,0xc2,0x96,0x50,0xba,0x32,0xfc,0x76,0xce ,0x48,0x33,0x2e,0xa7,0x16,0x4d,0x96,0xa4 ,0x47,0x6f,0xb8,0xc5,0x31,0xa1,0x18,0x6a ,0xc0,0xdf,0xc1,0x7c,0x98,0xdc,0xe8,0x7b ,0x4d,0xa7,0xf0,0x11,0xec,0x48,0xc9,0x72 ,0x71,0xd2,0xc2,0x0f,0x9b,0x92,0x8f,0xe2 ,0x27,0x0d,0x6f,0xb8,0x63,0xd5,0x17,0x38 ,0xb4,0x8e,0xee,0xe3,0x14,0xa7,0xcc,0x8a ,0xb9,0x32,0x16,0x45,0x48,0xe5,0x26,0xae ,0x90,0x22,0x43,0x68,0x51,0x7a,0xcf,0xea ,0xbd,0x6b,0xb3,0x73,0x2b,0xc0,0xe9,0xda ,0x99,0x83,0x2b,0x61,0xca,0x01,0xb6,0xde ,0x56,0x24,0x4a,0x9e,0x88,0xd5,0xf9,0xb3 ,0x79,0x73,0xf6,0x22,0xa4,0x3d,0x14,0xa6 ,0x59,0x9b,0x1f,0x65,0x4c,0xb4,0x5a,0x74 ,0xe3,0x55,0xa5 } ; char a_bytes[16] = { 0xf3,0xff,0xc7,0x70,0x3f,0x94,0x00,0xe5 ,0x2a,0x7d,0xfb,0x4b,0x3d,0x33,0x05,0xd9 } ; main() { string rs(rs_bytes,sizeof rs_bytes); string c(c_bytes,sizeof c_bytes); string a(a_bytes,sizeof a_bytes); try { crypto_onetimeauth_poly1305_verify(a,c,rs); printf("0\n"); } catch(const char *s) { printf("%s\n",s); } return 0; } curvedns-curvedns-0.87/nacl/tests/onetimeauth6.out000066400000000000000000000000021150631715100223400ustar00rootroot000000000000000 curvedns-curvedns-0.87/nacl/tests/onetimeauth7.c000066400000000000000000000015621150631715100217700ustar00rootroot00000000000000#include #include #include "crypto_onetimeauth_poly1305.h" #include "randombytes.h" unsigned char key[32]; unsigned char c[10000]; unsigned char a[16]; main() { int clen; int i; for (clen = 0;clen < 10000;++clen) { randombytes(key,sizeof key); randombytes(c,clen); crypto_onetimeauth_poly1305(a,c,clen,key); if (crypto_onetimeauth_poly1305_verify(a,c,clen,key) != 0) { printf("fail %d\n",clen); return 100; } if (clen > 0) { c[random() % clen] += 1 + (random() % 255); if (crypto_onetimeauth_poly1305_verify(a,c,clen,key) == 0) { printf("forgery %d\n",clen); return 100; } a[random() % sizeof a] += 1 + (random() % 255); if (crypto_onetimeauth_poly1305_verify(a,c,clen,key) == 0) { printf("forgery %d\n",clen); return 100; } } } return 0; } curvedns-curvedns-0.87/nacl/tests/onetimeauth7.out000066400000000000000000000000001150631715100223370ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/tests/onetimeauth8.cpp000066400000000000000000000022331150631715100223250ustar00rootroot00000000000000#include using std::string; #include #include #include "crypto_onetimeauth_poly1305.h" #include "randombytes.h" main() { int clen; int i; for (clen = 0;clen < 10000;++clen) { unsigned char key_bytes[32]; randombytes(key_bytes,sizeof key_bytes); string key((char *) key_bytes,sizeof key_bytes); unsigned char c_bytes[clen]; randombytes(c_bytes,sizeof c_bytes); string c((char *) c_bytes,sizeof c_bytes); string a = crypto_onetimeauth_poly1305(c,key); try { crypto_onetimeauth_poly1305_verify(a,c,key); } catch(const char *s) { printf("fail %d %s\n",clen,s); return 100; } if (clen > 0) { size_t pos = random() % clen; c.replace(pos,1,1,c[pos] + 1 + (random() % 255)); try { crypto_onetimeauth_poly1305_verify(a,c,key); printf("forgery %d\n",clen); } catch(const char *s) { ; } pos = random() % a.size(); a.replace(pos,1,1,a[pos] + 1 + (random() % 255)); try { crypto_onetimeauth_poly1305_verify(a,c,key); printf("forgery %d\n",clen); } catch(const char *s) { ; } } } return 0; } curvedns-curvedns-0.87/nacl/tests/onetimeauth8.out000066400000000000000000000000001150631715100223400ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/tests/scalarmult.c000066400000000000000000000010261150631715100215210ustar00rootroot00000000000000#include #include "crypto_scalarmult_curve25519.h" unsigned char alicesk[32] = { 0x77,0x07,0x6d,0x0a,0x73,0x18,0xa5,0x7d ,0x3c,0x16,0xc1,0x72,0x51,0xb2,0x66,0x45 ,0xdf,0x4c,0x2f,0x87,0xeb,0xc0,0x99,0x2a ,0xb1,0x77,0xfb,0xa5,0x1d,0xb9,0x2c,0x2a } ; unsigned char alicepk[32]; main() { int i; crypto_scalarmult_curve25519_base(alicepk,alicesk); for (i = 0;i < 32;++i) { if (i > 0) printf(","); else printf(" "); printf("0x%02x",(unsigned int) alicepk[i]); if (i % 8 == 7) printf("\n"); } return 0; } curvedns-curvedns-0.87/nacl/tests/scalarmult.out000066400000000000000000000002441150631715100221070ustar00rootroot00000000000000 0x85,0x20,0xf0,0x09,0x89,0x30,0xa7,0x54 ,0x74,0x8b,0x7d,0xdc,0xb4,0x3e,0xf7,0x5a ,0x0d,0xbf,0x3a,0x0d,0x26,0x38,0x1a,0xf4 ,0xeb,0xa4,0xa9,0x8e,0xaa,0x9b,0x4e,0x6a curvedns-curvedns-0.87/nacl/tests/scalarmult2.c000066400000000000000000000010141150631715100216000ustar00rootroot00000000000000#include #include "crypto_scalarmult_curve25519.h" unsigned char bobsk[32] = { 0x5d,0xab,0x08,0x7e,0x62,0x4a,0x8a,0x4b ,0x79,0xe1,0x7f,0x8b,0x83,0x80,0x0e,0xe6 ,0x6f,0x3b,0xb1,0x29,0x26,0x18,0xb6,0xfd ,0x1c,0x2f,0x8b,0x27,0xff,0x88,0xe0,0xeb } ; unsigned char bobpk[32]; main() { int i; crypto_scalarmult_curve25519_base(bobpk,bobsk); for (i = 0;i < 32;++i) { if (i > 0) printf(","); else printf(" "); printf("0x%02x",(unsigned int) bobpk[i]); if (i % 8 == 7) printf("\n"); } return 0; } curvedns-curvedns-0.87/nacl/tests/scalarmult2.out000066400000000000000000000002441150631715100221710ustar00rootroot00000000000000 0xde,0x9e,0xdb,0x7d,0x7b,0x7d,0xc1,0xb4 ,0xd3,0x5b,0x61,0xc2,0xec,0xe4,0x35,0x37 ,0x3f,0x83,0x43,0xc8,0x5b,0x78,0x67,0x4d ,0xad,0xfc,0x7e,0x14,0x6f,0x88,0x2b,0x4f curvedns-curvedns-0.87/nacl/tests/scalarmult3.cpp000066400000000000000000000014031150631715100221430ustar00rootroot00000000000000#include #include #include using std::string; using std::cout; using std::setfill; using std::setw; using std::hex; #include "crypto_scalarmult_curve25519.h" char alicesk_bytes[32] = { 0x77,0x07,0x6d,0x0a,0x73,0x18,0xa5,0x7d ,0x3c,0x16,0xc1,0x72,0x51,0xb2,0x66,0x45 ,0xdf,0x4c,0x2f,0x87,0xeb,0xc0,0x99,0x2a ,0xb1,0x77,0xfb,0xa5,0x1d,0xb9,0x2c,0x2a } ; main() { int i; cout << setfill('0'); string alicesk(alicesk_bytes,sizeof alicesk_bytes); string alicepk = crypto_scalarmult_curve25519_base(alicesk); for (i = 0;i < alicepk.size();++i) { unsigned char c = alicepk[i]; if (i > 0) cout << ","; else cout << " "; cout << "0x" << hex << setw(2) << (unsigned int) c; if (i % 8 == 7) cout << "\n"; } return 0; } curvedns-curvedns-0.87/nacl/tests/scalarmult3.out000066400000000000000000000002441150631715100221720ustar00rootroot00000000000000 0x85,0x20,0xf0,0x09,0x89,0x30,0xa7,0x54 ,0x74,0x8b,0x7d,0xdc,0xb4,0x3e,0xf7,0x5a ,0x0d,0xbf,0x3a,0x0d,0x26,0x38,0x1a,0xf4 ,0xeb,0xa4,0xa9,0x8e,0xaa,0x9b,0x4e,0x6a curvedns-curvedns-0.87/nacl/tests/scalarmult4.cpp000066400000000000000000000013631150631715100221510ustar00rootroot00000000000000#include #include #include using std::string; using std::cout; using std::setfill; using std::setw; using std::hex; #include "crypto_scalarmult_curve25519.h" char bobsk_bytes[32] = { 0x5d,0xab,0x08,0x7e,0x62,0x4a,0x8a,0x4b ,0x79,0xe1,0x7f,0x8b,0x83,0x80,0x0e,0xe6 ,0x6f,0x3b,0xb1,0x29,0x26,0x18,0xb6,0xfd ,0x1c,0x2f,0x8b,0x27,0xff,0x88,0xe0,0xeb } ; main() { int i; cout << setfill('0'); string bobsk(bobsk_bytes,sizeof bobsk_bytes); string bobpk = crypto_scalarmult_curve25519_base(bobsk); for (i = 0;i < bobpk.size();++i) { unsigned char c = bobpk[i]; if (i > 0) cout << ","; else cout << " "; cout << "0x" << hex << setw(2) << (unsigned int) c; if (i % 8 == 7) cout << "\n"; } return 0; } curvedns-curvedns-0.87/nacl/tests/scalarmult4.out000066400000000000000000000002441150631715100221730ustar00rootroot00000000000000 0xde,0x9e,0xdb,0x7d,0x7b,0x7d,0xc1,0xb4 ,0xd3,0x5b,0x61,0xc2,0xec,0xe4,0x35,0x37 ,0x3f,0x83,0x43,0xc8,0x5b,0x78,0x67,0x4d ,0xad,0xfc,0x7e,0x14,0x6f,0x88,0x2b,0x4f curvedns-curvedns-0.87/nacl/tests/scalarmult5.c000066400000000000000000000013121150631715100216040ustar00rootroot00000000000000#include #include "crypto_scalarmult_curve25519.h" unsigned char alicesk[32] = { 0x77,0x07,0x6d,0x0a,0x73,0x18,0xa5,0x7d ,0x3c,0x16,0xc1,0x72,0x51,0xb2,0x66,0x45 ,0xdf,0x4c,0x2f,0x87,0xeb,0xc0,0x99,0x2a ,0xb1,0x77,0xfb,0xa5,0x1d,0xb9,0x2c,0x2a } ; unsigned char bobpk[32] = { 0xde,0x9e,0xdb,0x7d,0x7b,0x7d,0xc1,0xb4 ,0xd3,0x5b,0x61,0xc2,0xec,0xe4,0x35,0x37 ,0x3f,0x83,0x43,0xc8,0x5b,0x78,0x67,0x4d ,0xad,0xfc,0x7e,0x14,0x6f,0x88,0x2b,0x4f } ; unsigned char k[32]; main() { int i; crypto_scalarmult_curve25519(k,alicesk,bobpk); for (i = 0;i < 32;++i) { if (i > 0) printf(","); else printf(" "); printf("0x%02x",(unsigned int) k[i]); if (i % 8 == 7) printf("\n"); } return 0; } curvedns-curvedns-0.87/nacl/tests/scalarmult5.out000066400000000000000000000002441150631715100221740ustar00rootroot00000000000000 0x4a,0x5d,0x9d,0x5b,0xa4,0xce,0x2d,0xe1 ,0x72,0x8e,0x3b,0xf4,0x80,0x35,0x0f,0x25 ,0xe0,0x7e,0x21,0xc9,0x47,0xd1,0x9e,0x33 ,0x76,0xf0,0x9b,0x3c,0x1e,0x16,0x17,0x42 curvedns-curvedns-0.87/nacl/tests/scalarmult6.c000066400000000000000000000013121150631715100216050ustar00rootroot00000000000000#include #include "crypto_scalarmult_curve25519.h" unsigned char bobsk[32] = { 0x5d,0xab,0x08,0x7e,0x62,0x4a,0x8a,0x4b ,0x79,0xe1,0x7f,0x8b,0x83,0x80,0x0e,0xe6 ,0x6f,0x3b,0xb1,0x29,0x26,0x18,0xb6,0xfd ,0x1c,0x2f,0x8b,0x27,0xff,0x88,0xe0,0xeb } ; unsigned char alicepk[32] = { 0x85,0x20,0xf0,0x09,0x89,0x30,0xa7,0x54 ,0x74,0x8b,0x7d,0xdc,0xb4,0x3e,0xf7,0x5a ,0x0d,0xbf,0x3a,0x0d,0x26,0x38,0x1a,0xf4 ,0xeb,0xa4,0xa9,0x8e,0xaa,0x9b,0x4e,0x6a } ; unsigned char k[32]; main() { int i; crypto_scalarmult_curve25519(k,bobsk,alicepk); for (i = 0;i < 32;++i) { if (i > 0) printf(","); else printf(" "); printf("0x%02x",(unsigned int) k[i]); if (i % 8 == 7) printf("\n"); } return 0; } curvedns-curvedns-0.87/nacl/tests/scalarmult6.out000066400000000000000000000002441150631715100221750ustar00rootroot00000000000000 0x4a,0x5d,0x9d,0x5b,0xa4,0xce,0x2d,0xe1 ,0x72,0x8e,0x3b,0xf4,0x80,0x35,0x0f,0x25 ,0xe0,0x7e,0x21,0xc9,0x47,0xd1,0x9e,0x33 ,0x76,0xf0,0x9b,0x3c,0x1e,0x16,0x17,0x42 curvedns-curvedns-0.87/nacl/tests/scalarmult7.cpp000066400000000000000000000015301150631715100221500ustar00rootroot00000000000000#include using std::string; #include #include "crypto_scalarmult_curve25519.h" char alicesk_bytes[32] = { 0x77,0x07,0x6d,0x0a,0x73,0x18,0xa5,0x7d ,0x3c,0x16,0xc1,0x72,0x51,0xb2,0x66,0x45 ,0xdf,0x4c,0x2f,0x87,0xeb,0xc0,0x99,0x2a ,0xb1,0x77,0xfb,0xa5,0x1d,0xb9,0x2c,0x2a } ; char bobpk_bytes[32] = { 0xde,0x9e,0xdb,0x7d,0x7b,0x7d,0xc1,0xb4 ,0xd3,0x5b,0x61,0xc2,0xec,0xe4,0x35,0x37 ,0x3f,0x83,0x43,0xc8,0x5b,0x78,0x67,0x4d ,0xad,0xfc,0x7e,0x14,0x6f,0x88,0x2b,0x4f } ; main() { int i; string alicesk(alicesk_bytes,sizeof alicesk_bytes); string bobpk(bobpk_bytes,sizeof bobpk_bytes); string k = crypto_scalarmult_curve25519(alicesk,bobpk); for (i = 0;i < k.size();++i) { if (i > 0) printf(","); else printf(" "); printf("0x%02x",(unsigned int) (unsigned char) k[i]); if (i % 8 == 7) printf("\n"); } return 0; } curvedns-curvedns-0.87/nacl/tests/scalarmult7.out000066400000000000000000000002441150631715100221760ustar00rootroot00000000000000 0x4a,0x5d,0x9d,0x5b,0xa4,0xce,0x2d,0xe1 ,0x72,0x8e,0x3b,0xf4,0x80,0x35,0x0f,0x25 ,0xe0,0x7e,0x21,0xc9,0x47,0xd1,0x9e,0x33 ,0x76,0xf0,0x9b,0x3c,0x1e,0x16,0x17,0x42 curvedns-curvedns-0.87/nacl/tests/secretbox.c000066400000000000000000000030521150631715100213510ustar00rootroot00000000000000#include #include "crypto_secretbox_xsalsa20poly1305.h" unsigned char firstkey[32] = { 0x1b,0x27,0x55,0x64,0x73,0xe9,0x85,0xd4 ,0x62,0xcd,0x51,0x19,0x7a,0x9a,0x46,0xc7 ,0x60,0x09,0x54,0x9e,0xac,0x64,0x74,0xf2 ,0x06,0xc4,0xee,0x08,0x44,0xf6,0x83,0x89 } ; unsigned char nonce[24] = { 0x69,0x69,0x6e,0xe9,0x55,0xb6,0x2b,0x73 ,0xcd,0x62,0xbd,0xa8,0x75,0xfc,0x73,0xd6 ,0x82,0x19,0xe0,0x03,0x6b,0x7a,0x0b,0x37 } ; // API requires first 32 bytes to be 0 unsigned char m[163] = { 0, 0, 0, 0, 0, 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0 ,0xbe,0x07,0x5f,0xc5,0x3c,0x81,0xf2,0xd5 ,0xcf,0x14,0x13,0x16,0xeb,0xeb,0x0c,0x7b ,0x52,0x28,0xc5,0x2a,0x4c,0x62,0xcb,0xd4 ,0x4b,0x66,0x84,0x9b,0x64,0x24,0x4f,0xfc ,0xe5,0xec,0xba,0xaf,0x33,0xbd,0x75,0x1a ,0x1a,0xc7,0x28,0xd4,0x5e,0x6c,0x61,0x29 ,0x6c,0xdc,0x3c,0x01,0x23,0x35,0x61,0xf4 ,0x1d,0xb6,0x6c,0xce,0x31,0x4a,0xdb,0x31 ,0x0e,0x3b,0xe8,0x25,0x0c,0x46,0xf0,0x6d ,0xce,0xea,0x3a,0x7f,0xa1,0x34,0x80,0x57 ,0xe2,0xf6,0x55,0x6a,0xd6,0xb1,0x31,0x8a ,0x02,0x4a,0x83,0x8f,0x21,0xaf,0x1f,0xde ,0x04,0x89,0x77,0xeb,0x48,0xf5,0x9f,0xfd ,0x49,0x24,0xca,0x1c,0x60,0x90,0x2e,0x52 ,0xf0,0xa0,0x89,0xbc,0x76,0x89,0x70,0x40 ,0xe0,0x82,0xf9,0x37,0x76,0x38,0x48,0x64 ,0x5e,0x07,0x05 } ; unsigned char c[163]; main() { int i; crypto_secretbox_xsalsa20poly1305( c,m,163,nonce,firstkey ); for (i = 16;i < 163;++i) { printf(",0x%02x",(unsigned int) c[i]); if (i % 8 == 7) printf("\n"); } printf("\n"); return 0; } curvedns-curvedns-0.87/nacl/tests/secretbox.out000066400000000000000000000013621150631715100217400ustar00rootroot00000000000000,0xf3,0xff,0xc7,0x70,0x3f,0x94,0x00,0xe5 ,0x2a,0x7d,0xfb,0x4b,0x3d,0x33,0x05,0xd9 ,0x8e,0x99,0x3b,0x9f,0x48,0x68,0x12,0x73 ,0xc2,0x96,0x50,0xba,0x32,0xfc,0x76,0xce ,0x48,0x33,0x2e,0xa7,0x16,0x4d,0x96,0xa4 ,0x47,0x6f,0xb8,0xc5,0x31,0xa1,0x18,0x6a ,0xc0,0xdf,0xc1,0x7c,0x98,0xdc,0xe8,0x7b ,0x4d,0xa7,0xf0,0x11,0xec,0x48,0xc9,0x72 ,0x71,0xd2,0xc2,0x0f,0x9b,0x92,0x8f,0xe2 ,0x27,0x0d,0x6f,0xb8,0x63,0xd5,0x17,0x38 ,0xb4,0x8e,0xee,0xe3,0x14,0xa7,0xcc,0x8a ,0xb9,0x32,0x16,0x45,0x48,0xe5,0x26,0xae ,0x90,0x22,0x43,0x68,0x51,0x7a,0xcf,0xea ,0xbd,0x6b,0xb3,0x73,0x2b,0xc0,0xe9,0xda ,0x99,0x83,0x2b,0x61,0xca,0x01,0xb6,0xde ,0x56,0x24,0x4a,0x9e,0x88,0xd5,0xf9,0xb3 ,0x79,0x73,0xf6,0x22,0xa4,0x3d,0x14,0xa6 ,0x59,0x9b,0x1f,0x65,0x4c,0xb4,0x5a,0x74 ,0xe3,0x55,0xa5 curvedns-curvedns-0.87/nacl/tests/secretbox2.c000066400000000000000000000031171150631715100214350ustar00rootroot00000000000000#include #include "crypto_secretbox_xsalsa20poly1305.h" unsigned char firstkey[32] = { 0x1b,0x27,0x55,0x64,0x73,0xe9,0x85,0xd4 ,0x62,0xcd,0x51,0x19,0x7a,0x9a,0x46,0xc7 ,0x60,0x09,0x54,0x9e,0xac,0x64,0x74,0xf2 ,0x06,0xc4,0xee,0x08,0x44,0xf6,0x83,0x89 } ; unsigned char nonce[24] = { 0x69,0x69,0x6e,0xe9,0x55,0xb6,0x2b,0x73 ,0xcd,0x62,0xbd,0xa8,0x75,0xfc,0x73,0xd6 ,0x82,0x19,0xe0,0x03,0x6b,0x7a,0x0b,0x37 } ; // API requires first 16 bytes to be 0 unsigned char c[163] = { 0, 0, 0, 0, 0, 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0 ,0xf3,0xff,0xc7,0x70,0x3f,0x94,0x00,0xe5 ,0x2a,0x7d,0xfb,0x4b,0x3d,0x33,0x05,0xd9 ,0x8e,0x99,0x3b,0x9f,0x48,0x68,0x12,0x73 ,0xc2,0x96,0x50,0xba,0x32,0xfc,0x76,0xce ,0x48,0x33,0x2e,0xa7,0x16,0x4d,0x96,0xa4 ,0x47,0x6f,0xb8,0xc5,0x31,0xa1,0x18,0x6a ,0xc0,0xdf,0xc1,0x7c,0x98,0xdc,0xe8,0x7b ,0x4d,0xa7,0xf0,0x11,0xec,0x48,0xc9,0x72 ,0x71,0xd2,0xc2,0x0f,0x9b,0x92,0x8f,0xe2 ,0x27,0x0d,0x6f,0xb8,0x63,0xd5,0x17,0x38 ,0xb4,0x8e,0xee,0xe3,0x14,0xa7,0xcc,0x8a ,0xb9,0x32,0x16,0x45,0x48,0xe5,0x26,0xae ,0x90,0x22,0x43,0x68,0x51,0x7a,0xcf,0xea ,0xbd,0x6b,0xb3,0x73,0x2b,0xc0,0xe9,0xda ,0x99,0x83,0x2b,0x61,0xca,0x01,0xb6,0xde ,0x56,0x24,0x4a,0x9e,0x88,0xd5,0xf9,0xb3 ,0x79,0x73,0xf6,0x22,0xa4,0x3d,0x14,0xa6 ,0x59,0x9b,0x1f,0x65,0x4c,0xb4,0x5a,0x74 ,0xe3,0x55,0xa5 } ; unsigned char m[163]; main() { int i; if (crypto_secretbox_xsalsa20poly1305_open( m,c,163,nonce,firstkey ) == 0) { for (i = 32;i < 163;++i) { printf(",0x%02x",(unsigned int) m[i]); if (i % 8 == 7) printf("\n"); } printf("\n"); } return 0; } curvedns-curvedns-0.87/nacl/tests/secretbox2.out000066400000000000000000000012401150631715100220150ustar00rootroot00000000000000,0xbe,0x07,0x5f,0xc5,0x3c,0x81,0xf2,0xd5 ,0xcf,0x14,0x13,0x16,0xeb,0xeb,0x0c,0x7b ,0x52,0x28,0xc5,0x2a,0x4c,0x62,0xcb,0xd4 ,0x4b,0x66,0x84,0x9b,0x64,0x24,0x4f,0xfc ,0xe5,0xec,0xba,0xaf,0x33,0xbd,0x75,0x1a ,0x1a,0xc7,0x28,0xd4,0x5e,0x6c,0x61,0x29 ,0x6c,0xdc,0x3c,0x01,0x23,0x35,0x61,0xf4 ,0x1d,0xb6,0x6c,0xce,0x31,0x4a,0xdb,0x31 ,0x0e,0x3b,0xe8,0x25,0x0c,0x46,0xf0,0x6d ,0xce,0xea,0x3a,0x7f,0xa1,0x34,0x80,0x57 ,0xe2,0xf6,0x55,0x6a,0xd6,0xb1,0x31,0x8a ,0x02,0x4a,0x83,0x8f,0x21,0xaf,0x1f,0xde ,0x04,0x89,0x77,0xeb,0x48,0xf5,0x9f,0xfd ,0x49,0x24,0xca,0x1c,0x60,0x90,0x2e,0x52 ,0xf0,0xa0,0x89,0xbc,0x76,0x89,0x70,0x40 ,0xe0,0x82,0xf9,0x37,0x76,0x38,0x48,0x64 ,0x5e,0x07,0x05 curvedns-curvedns-0.87/nacl/tests/secretbox3.cpp000066400000000000000000000030021150631715100217670ustar00rootroot00000000000000#include using std::string; #include #include "crypto_secretbox_xsalsa20poly1305.h" char firstkey_bytes[32] = { 0x1b,0x27,0x55,0x64,0x73,0xe9,0x85,0xd4 ,0x62,0xcd,0x51,0x19,0x7a,0x9a,0x46,0xc7 ,0x60,0x09,0x54,0x9e,0xac,0x64,0x74,0xf2 ,0x06,0xc4,0xee,0x08,0x44,0xf6,0x83,0x89 } ; char nonce_bytes[24] = { 0x69,0x69,0x6e,0xe9,0x55,0xb6,0x2b,0x73 ,0xcd,0x62,0xbd,0xa8,0x75,0xfc,0x73,0xd6 ,0x82,0x19,0xe0,0x03,0x6b,0x7a,0x0b,0x37 } ; char m_bytes[131] = { 0xbe,0x07,0x5f,0xc5,0x3c,0x81,0xf2,0xd5 ,0xcf,0x14,0x13,0x16,0xeb,0xeb,0x0c,0x7b ,0x52,0x28,0xc5,0x2a,0x4c,0x62,0xcb,0xd4 ,0x4b,0x66,0x84,0x9b,0x64,0x24,0x4f,0xfc ,0xe5,0xec,0xba,0xaf,0x33,0xbd,0x75,0x1a ,0x1a,0xc7,0x28,0xd4,0x5e,0x6c,0x61,0x29 ,0x6c,0xdc,0x3c,0x01,0x23,0x35,0x61,0xf4 ,0x1d,0xb6,0x6c,0xce,0x31,0x4a,0xdb,0x31 ,0x0e,0x3b,0xe8,0x25,0x0c,0x46,0xf0,0x6d ,0xce,0xea,0x3a,0x7f,0xa1,0x34,0x80,0x57 ,0xe2,0xf6,0x55,0x6a,0xd6,0xb1,0x31,0x8a ,0x02,0x4a,0x83,0x8f,0x21,0xaf,0x1f,0xde ,0x04,0x89,0x77,0xeb,0x48,0xf5,0x9f,0xfd ,0x49,0x24,0xca,0x1c,0x60,0x90,0x2e,0x52 ,0xf0,0xa0,0x89,0xbc,0x76,0x89,0x70,0x40 ,0xe0,0x82,0xf9,0x37,0x76,0x38,0x48,0x64 ,0x5e,0x07,0x05 } ; main() { int i; string m(m_bytes,sizeof m_bytes); string nonce(nonce_bytes,sizeof nonce_bytes); string firstkey(firstkey_bytes,sizeof firstkey_bytes); string c = crypto_secretbox_xsalsa20poly1305(m,nonce,firstkey); for (i = 0;i < c.size();++i) { printf(",0x%02x",(unsigned int) (unsigned char) c[i]); if (i % 8 == 7) printf("\n"); } printf("\n"); return 0; } curvedns-curvedns-0.87/nacl/tests/secretbox3.out000066400000000000000000000013621150631715100220230ustar00rootroot00000000000000,0xf3,0xff,0xc7,0x70,0x3f,0x94,0x00,0xe5 ,0x2a,0x7d,0xfb,0x4b,0x3d,0x33,0x05,0xd9 ,0x8e,0x99,0x3b,0x9f,0x48,0x68,0x12,0x73 ,0xc2,0x96,0x50,0xba,0x32,0xfc,0x76,0xce ,0x48,0x33,0x2e,0xa7,0x16,0x4d,0x96,0xa4 ,0x47,0x6f,0xb8,0xc5,0x31,0xa1,0x18,0x6a ,0xc0,0xdf,0xc1,0x7c,0x98,0xdc,0xe8,0x7b ,0x4d,0xa7,0xf0,0x11,0xec,0x48,0xc9,0x72 ,0x71,0xd2,0xc2,0x0f,0x9b,0x92,0x8f,0xe2 ,0x27,0x0d,0x6f,0xb8,0x63,0xd5,0x17,0x38 ,0xb4,0x8e,0xee,0xe3,0x14,0xa7,0xcc,0x8a ,0xb9,0x32,0x16,0x45,0x48,0xe5,0x26,0xae ,0x90,0x22,0x43,0x68,0x51,0x7a,0xcf,0xea ,0xbd,0x6b,0xb3,0x73,0x2b,0xc0,0xe9,0xda ,0x99,0x83,0x2b,0x61,0xca,0x01,0xb6,0xde ,0x56,0x24,0x4a,0x9e,0x88,0xd5,0xf9,0xb3 ,0x79,0x73,0xf6,0x22,0xa4,0x3d,0x14,0xa6 ,0x59,0x9b,0x1f,0x65,0x4c,0xb4,0x5a,0x74 ,0xe3,0x55,0xa5 curvedns-curvedns-0.87/nacl/tests/secretbox4.cpp000066400000000000000000000031311150631715100217730ustar00rootroot00000000000000#include using std::string; #include #include "crypto_secretbox_xsalsa20poly1305.h" char firstkey_bytes[32] = { 0x1b,0x27,0x55,0x64,0x73,0xe9,0x85,0xd4 ,0x62,0xcd,0x51,0x19,0x7a,0x9a,0x46,0xc7 ,0x60,0x09,0x54,0x9e,0xac,0x64,0x74,0xf2 ,0x06,0xc4,0xee,0x08,0x44,0xf6,0x83,0x89 } ; char nonce_bytes[24] = { 0x69,0x69,0x6e,0xe9,0x55,0xb6,0x2b,0x73 ,0xcd,0x62,0xbd,0xa8,0x75,0xfc,0x73,0xd6 ,0x82,0x19,0xe0,0x03,0x6b,0x7a,0x0b,0x37 } ; char c_bytes[147] = { 0xf3,0xff,0xc7,0x70,0x3f,0x94,0x00,0xe5 ,0x2a,0x7d,0xfb,0x4b,0x3d,0x33,0x05,0xd9 ,0x8e,0x99,0x3b,0x9f,0x48,0x68,0x12,0x73 ,0xc2,0x96,0x50,0xba,0x32,0xfc,0x76,0xce ,0x48,0x33,0x2e,0xa7,0x16,0x4d,0x96,0xa4 ,0x47,0x6f,0xb8,0xc5,0x31,0xa1,0x18,0x6a ,0xc0,0xdf,0xc1,0x7c,0x98,0xdc,0xe8,0x7b ,0x4d,0xa7,0xf0,0x11,0xec,0x48,0xc9,0x72 ,0x71,0xd2,0xc2,0x0f,0x9b,0x92,0x8f,0xe2 ,0x27,0x0d,0x6f,0xb8,0x63,0xd5,0x17,0x38 ,0xb4,0x8e,0xee,0xe3,0x14,0xa7,0xcc,0x8a ,0xb9,0x32,0x16,0x45,0x48,0xe5,0x26,0xae ,0x90,0x22,0x43,0x68,0x51,0x7a,0xcf,0xea ,0xbd,0x6b,0xb3,0x73,0x2b,0xc0,0xe9,0xda ,0x99,0x83,0x2b,0x61,0xca,0x01,0xb6,0xde ,0x56,0x24,0x4a,0x9e,0x88,0xd5,0xf9,0xb3 ,0x79,0x73,0xf6,0x22,0xa4,0x3d,0x14,0xa6 ,0x59,0x9b,0x1f,0x65,0x4c,0xb4,0x5a,0x74 ,0xe3,0x55,0xa5 } ; main() { int i; string firstkey(firstkey_bytes,sizeof firstkey_bytes); string nonce(nonce_bytes,sizeof nonce_bytes); string c(c_bytes,sizeof c_bytes); string m = crypto_secretbox_xsalsa20poly1305_open(c,nonce,firstkey); for (i = 0;i < m.size();++i) { printf(",0x%02x",(unsigned int) (unsigned char) m[i]); if (i % 8 == 7) printf("\n"); } printf("\n"); return 0; } curvedns-curvedns-0.87/nacl/tests/secretbox4.out000066400000000000000000000012401150631715100220170ustar00rootroot00000000000000,0xbe,0x07,0x5f,0xc5,0x3c,0x81,0xf2,0xd5 ,0xcf,0x14,0x13,0x16,0xeb,0xeb,0x0c,0x7b ,0x52,0x28,0xc5,0x2a,0x4c,0x62,0xcb,0xd4 ,0x4b,0x66,0x84,0x9b,0x64,0x24,0x4f,0xfc ,0xe5,0xec,0xba,0xaf,0x33,0xbd,0x75,0x1a ,0x1a,0xc7,0x28,0xd4,0x5e,0x6c,0x61,0x29 ,0x6c,0xdc,0x3c,0x01,0x23,0x35,0x61,0xf4 ,0x1d,0xb6,0x6c,0xce,0x31,0x4a,0xdb,0x31 ,0x0e,0x3b,0xe8,0x25,0x0c,0x46,0xf0,0x6d ,0xce,0xea,0x3a,0x7f,0xa1,0x34,0x80,0x57 ,0xe2,0xf6,0x55,0x6a,0xd6,0xb1,0x31,0x8a ,0x02,0x4a,0x83,0x8f,0x21,0xaf,0x1f,0xde ,0x04,0x89,0x77,0xeb,0x48,0xf5,0x9f,0xfd ,0x49,0x24,0xca,0x1c,0x60,0x90,0x2e,0x52 ,0xf0,0xa0,0x89,0xbc,0x76,0x89,0x70,0x40 ,0xe0,0x82,0xf9,0x37,0x76,0x38,0x48,0x64 ,0x5e,0x07,0x05 curvedns-curvedns-0.87/nacl/tests/secretbox5.cpp000066400000000000000000000014631150631715100220020ustar00rootroot00000000000000#include using std::string; #include #include "crypto_secretbox.h" #include "randombytes.h" main() { int mlen; for (mlen = 0;mlen < 1000;++mlen) { unsigned char kbytes[crypto_secretbox_KEYBYTES]; randombytes(kbytes,crypto_secretbox_KEYBYTES); string k((char *) kbytes,crypto_secretbox_KEYBYTES); unsigned char nbytes[crypto_secretbox_NONCEBYTES]; randombytes(nbytes,crypto_secretbox_NONCEBYTES); string n((char *) nbytes,crypto_secretbox_NONCEBYTES); unsigned char mbytes[mlen]; randombytes(mbytes,mlen); string m((char *) mbytes,mlen); string c = crypto_secretbox(m,n,k); try { string m2 = crypto_secretbox_open(c,n,k); if (m != m2) printf("bad decryption\n"); } catch(const char *s) { printf("%s\n",s); } } return 0; } curvedns-curvedns-0.87/nacl/tests/secretbox5.out000066400000000000000000000000001150631715100220110ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/tests/secretbox6.cpp000066400000000000000000000020651150631715100220020ustar00rootroot00000000000000#include using std::string; #include #include #include "crypto_secretbox.h" #include "randombytes.h" main() { int mlen; for (mlen = 0;mlen < 1000;++mlen) { unsigned char kbytes[crypto_secretbox_KEYBYTES]; randombytes(kbytes,crypto_secretbox_KEYBYTES); string k((char *) kbytes,crypto_secretbox_KEYBYTES); unsigned char nbytes[crypto_secretbox_NONCEBYTES]; randombytes(nbytes,crypto_secretbox_NONCEBYTES); string n((char *) nbytes,crypto_secretbox_NONCEBYTES); unsigned char mbytes[mlen]; randombytes(mbytes,mlen); string m((char *) mbytes,mlen); string c = crypto_secretbox(m,n,k); int caught = 0; while (caught < 10) { c.replace(random() % c.size(),1,1,random()); try { string m2 = crypto_secretbox_open(c,n,k); if (m != m2) { printf("forgery\n"); return 100; } } catch(const char *s) { if (string(s) == string("ciphertext fails verification")) ++caught; else { printf("%s\n",s); return 111; } } } } return 0; } curvedns-curvedns-0.87/nacl/tests/secretbox6.out000066400000000000000000000000001150631715100220120ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/tests/secretbox7.c000066400000000000000000000015461150631715100214460ustar00rootroot00000000000000#include #include "crypto_secretbox.h" #include "randombytes.h" unsigned char k[crypto_secretbox_KEYBYTES]; unsigned char n[crypto_secretbox_NONCEBYTES]; unsigned char m[10000]; unsigned char c[10000]; unsigned char m2[10000]; main() { int mlen; int i; for (mlen = 0;mlen < 1000 && mlen + crypto_secretbox_ZEROBYTES < sizeof m;++mlen) { randombytes(k,crypto_secretbox_KEYBYTES); randombytes(n,crypto_secretbox_NONCEBYTES); randombytes(m + crypto_secretbox_ZEROBYTES,mlen); crypto_secretbox(c,m,mlen + crypto_secretbox_ZEROBYTES,n,k); if (crypto_secretbox_open(m2,c,mlen + crypto_secretbox_ZEROBYTES,n,k) == 0) { for (i = 0;i < mlen + crypto_secretbox_ZEROBYTES;++i) if (m2[i] != m[i]) { printf("bad decryption\n"); break; } } else { printf("ciphertext fails verification\n"); } } return 0; } curvedns-curvedns-0.87/nacl/tests/secretbox7.out000066400000000000000000000000001150631715100220130ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/tests/secretbox8.c000066400000000000000000000017271150631715100214500ustar00rootroot00000000000000#include #include "crypto_secretbox.h" #include "randombytes.h" unsigned char k[crypto_secretbox_KEYBYTES]; unsigned char n[crypto_secretbox_NONCEBYTES]; unsigned char m[10000]; unsigned char c[10000]; unsigned char m2[10000]; main() { int mlen; int i; int caught; for (mlen = 0;mlen < 1000 && mlen + crypto_secretbox_ZEROBYTES < sizeof m;++mlen) { randombytes(k,crypto_secretbox_KEYBYTES); randombytes(n,crypto_secretbox_NONCEBYTES); randombytes(m + crypto_secretbox_ZEROBYTES,mlen); crypto_secretbox(c,m,mlen + crypto_secretbox_ZEROBYTES,n,k); caught = 0; while (caught < 10) { c[random() % (mlen + crypto_secretbox_ZEROBYTES)] = random(); if (crypto_secretbox_open(m2,c,mlen + crypto_secretbox_ZEROBYTES,n,k) == 0) { for (i = 0;i < mlen + crypto_secretbox_ZEROBYTES;++i) if (m2[i] != m[i]) { printf("forgery\n"); return 100; } } else { ++caught; } } } return 0; } curvedns-curvedns-0.87/nacl/tests/secretbox8.out000066400000000000000000000000001150631715100220140ustar00rootroot00000000000000curvedns-curvedns-0.87/nacl/tests/stream.c000066400000000000000000000012651150631715100206520ustar00rootroot00000000000000#include #include "crypto_stream_xsalsa20.h" #include "crypto_hash_sha256.h" unsigned char firstkey[32] = { 0x1b,0x27,0x55,0x64,0x73,0xe9,0x85,0xd4 ,0x62,0xcd,0x51,0x19,0x7a,0x9a,0x46,0xc7 ,0x60,0x09,0x54,0x9e,0xac,0x64,0x74,0xf2 ,0x06,0xc4,0xee,0x08,0x44,0xf6,0x83,0x89 } ; unsigned char nonce[24] = { 0x69,0x69,0x6e,0xe9,0x55,0xb6,0x2b,0x73 ,0xcd,0x62,0xbd,0xa8,0x75,0xfc,0x73,0xd6 ,0x82,0x19,0xe0,0x03,0x6b,0x7a,0x0b,0x37 } ; unsigned char output[4194304]; unsigned char h[32]; main() { int i; crypto_stream_xsalsa20(output,4194304,nonce,firstkey); crypto_hash_sha256(h,output,sizeof output); for (i = 0;i < 32;++i) printf("%02x",h[i]); printf("\n"); return 0; } curvedns-curvedns-0.87/nacl/tests/stream.out000066400000000000000000000001011150631715100212230ustar00rootroot00000000000000662b9d0e3463029156069b12f918691a98f7dfb2ca0393c96bbfc6b1fbd630a2 curvedns-curvedns-0.87/nacl/tests/stream2.c000066400000000000000000000011561150631715100207330ustar00rootroot00000000000000#include #include "crypto_stream_salsa20.h" #include "crypto_hash_sha256.h" unsigned char secondkey[32] = { 0xdc,0x90,0x8d,0xda,0x0b,0x93,0x44,0xa9 ,0x53,0x62,0x9b,0x73,0x38,0x20,0x77,0x88 ,0x80,0xf3,0xce,0xb4,0x21,0xbb,0x61,0xb9 ,0x1c,0xbd,0x4c,0x3e,0x66,0x25,0x6c,0xe4 } ; unsigned char noncesuffix[8] = { 0x82,0x19,0xe0,0x03,0x6b,0x7a,0x0b,0x37 } ; unsigned char output[4194304]; unsigned char h[32]; main() { int i; crypto_stream_salsa20(output,4194304,noncesuffix,secondkey); crypto_hash_sha256(h,output,sizeof output); for (i = 0;i < 32;++i) printf("%02x",h[i]); printf("\n"); return 0; } curvedns-curvedns-0.87/nacl/tests/stream2.out000066400000000000000000000001011150631715100213050ustar00rootroot00000000000000662b9d0e3463029156069b12f918691a98f7dfb2ca0393c96bbfc6b1fbd630a2 curvedns-curvedns-0.87/nacl/tests/stream3.c000066400000000000000000000011601150631715100207270ustar00rootroot00000000000000#include #include "crypto_stream_xsalsa20.h" unsigned char firstkey[32] = { 0x1b,0x27,0x55,0x64,0x73,0xe9,0x85,0xd4 ,0x62,0xcd,0x51,0x19,0x7a,0x9a,0x46,0xc7 ,0x60,0x09,0x54,0x9e,0xac,0x64,0x74,0xf2 ,0x06,0xc4,0xee,0x08,0x44,0xf6,0x83,0x89 } ; unsigned char nonce[24] = { 0x69,0x69,0x6e,0xe9,0x55,0xb6,0x2b,0x73 ,0xcd,0x62,0xbd,0xa8,0x75,0xfc,0x73,0xd6 ,0x82,0x19,0xe0,0x03,0x6b,0x7a,0x0b,0x37 } ; unsigned char rs[32]; main() { int i; crypto_stream_xsalsa20(rs,32,nonce,firstkey); for (i = 0;i < 32;++i) { printf(",0x%02x",(unsigned int) rs[i]); if (i % 8 == 7) printf("\n"); } return 0; } curvedns-curvedns-0.87/nacl/tests/stream3.out000066400000000000000000000002441150631715100213160ustar00rootroot00000000000000,0xee,0xa6,0xa7,0x25,0x1c,0x1e,0x72,0x91 ,0x6d,0x11,0xc2,0xcb,0x21,0x4d,0x3c,0x25 ,0x25,0x39,0x12,0x1d,0x8e,0x23,0x4e,0x65 ,0x2d,0x65,0x1f,0xa4,0xc8,0xcf,0xf8,0x80 curvedns-curvedns-0.87/nacl/tests/stream4.c000066400000000000000000000027511150631715100207370ustar00rootroot00000000000000#include #include "crypto_stream_xsalsa20.h" unsigned char firstkey[32] = { 0x1b,0x27,0x55,0x64,0x73,0xe9,0x85,0xd4 ,0x62,0xcd,0x51,0x19,0x7a,0x9a,0x46,0xc7 ,0x60,0x09,0x54,0x9e,0xac,0x64,0x74,0xf2 ,0x06,0xc4,0xee,0x08,0x44,0xf6,0x83,0x89 } ; unsigned char nonce[24] = { 0x69,0x69,0x6e,0xe9,0x55,0xb6,0x2b,0x73 ,0xcd,0x62,0xbd,0xa8,0x75,0xfc,0x73,0xd6 ,0x82,0x19,0xe0,0x03,0x6b,0x7a,0x0b,0x37 } ; unsigned char m[163] = { 0, 0, 0, 0, 0, 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0 ,0xbe,0x07,0x5f,0xc5,0x3c,0x81,0xf2,0xd5 ,0xcf,0x14,0x13,0x16,0xeb,0xeb,0x0c,0x7b ,0x52,0x28,0xc5,0x2a,0x4c,0x62,0xcb,0xd4 ,0x4b,0x66,0x84,0x9b,0x64,0x24,0x4f,0xfc ,0xe5,0xec,0xba,0xaf,0x33,0xbd,0x75,0x1a ,0x1a,0xc7,0x28,0xd4,0x5e,0x6c,0x61,0x29 ,0x6c,0xdc,0x3c,0x01,0x23,0x35,0x61,0xf4 ,0x1d,0xb6,0x6c,0xce,0x31,0x4a,0xdb,0x31 ,0x0e,0x3b,0xe8,0x25,0x0c,0x46,0xf0,0x6d ,0xce,0xea,0x3a,0x7f,0xa1,0x34,0x80,0x57 ,0xe2,0xf6,0x55,0x6a,0xd6,0xb1,0x31,0x8a ,0x02,0x4a,0x83,0x8f,0x21,0xaf,0x1f,0xde ,0x04,0x89,0x77,0xeb,0x48,0xf5,0x9f,0xfd ,0x49,0x24,0xca,0x1c,0x60,0x90,0x2e,0x52 ,0xf0,0xa0,0x89,0xbc,0x76,0x89,0x70,0x40 ,0xe0,0x82,0xf9,0x37,0x76,0x38,0x48,0x64 ,0x5e,0x07,0x05 } ; unsigned char c[163]; main() { int i; crypto_stream_xsalsa20_xor(c,m,163,nonce,firstkey); for (i = 32;i < 163;++i) { printf(",0x%02x",(unsigned int) c[i]); if (i % 8 == 7) printf("\n"); } printf("\n"); return 0; } curvedns-curvedns-0.87/nacl/tests/stream4.out000066400000000000000000000012401150631715100213140ustar00rootroot00000000000000,0x8e,0x99,0x3b,0x9f,0x48,0x68,0x12,0x73 ,0xc2,0x96,0x50,0xba,0x32,0xfc,0x76,0xce ,0x48,0x33,0x2e,0xa7,0x16,0x4d,0x96,0xa4 ,0x47,0x6f,0xb8,0xc5,0x31,0xa1,0x18,0x6a ,0xc0,0xdf,0xc1,0x7c,0x98,0xdc,0xe8,0x7b ,0x4d,0xa7,0xf0,0x11,0xec,0x48,0xc9,0x72 ,0x71,0xd2,0xc2,0x0f,0x9b,0x92,0x8f,0xe2 ,0x27,0x0d,0x6f,0xb8,0x63,0xd5,0x17,0x38 ,0xb4,0x8e,0xee,0xe3,0x14,0xa7,0xcc,0x8a ,0xb9,0x32,0x16,0x45,0x48,0xe5,0x26,0xae ,0x90,0x22,0x43,0x68,0x51,0x7a,0xcf,0xea ,0xbd,0x6b,0xb3,0x73,0x2b,0xc0,0xe9,0xda ,0x99,0x83,0x2b,0x61,0xca,0x01,0xb6,0xde ,0x56,0x24,0x4a,0x9e,0x88,0xd5,0xf9,0xb3 ,0x79,0x73,0xf6,0x22,0xa4,0x3d,0x14,0xa6 ,0x59,0x9b,0x1f,0x65,0x4c,0xb4,0x5a,0x74 ,0xe3,0x55,0xa5 curvedns-curvedns-0.87/nacl/tests/stream5.cpp000066400000000000000000000014521150631715100212750ustar00rootroot00000000000000#include using std::string; #include #include "crypto_stream_xsalsa20.h" #include "crypto_hash_sha256.h" char firstkey_bytes[32] = { 0x1b,0x27,0x55,0x64,0x73,0xe9,0x85,0xd4 ,0x62,0xcd,0x51,0x19,0x7a,0x9a,0x46,0xc7 ,0x60,0x09,0x54,0x9e,0xac,0x64,0x74,0xf2 ,0x06,0xc4,0xee,0x08,0x44,0xf6,0x83,0x89 } ; char nonce_bytes[24] = { 0x69,0x69,0x6e,0xe9,0x55,0xb6,0x2b,0x73 ,0xcd,0x62,0xbd,0xa8,0x75,0xfc,0x73,0xd6 ,0x82,0x19,0xe0,0x03,0x6b,0x7a,0x0b,0x37 } ; main() { int i; string firstkey(firstkey_bytes,sizeof firstkey_bytes); string nonce(nonce_bytes,sizeof nonce_bytes); string output = crypto_stream_xsalsa20(4194304,nonce,firstkey); string h = crypto_hash_sha256(output); for (i = 0;i < 32;++i) printf("%02x",(unsigned int) (unsigned char) h[i]); printf("\n"); return 0; } curvedns-curvedns-0.87/nacl/tests/stream5.out000066400000000000000000000001011150631715100213100ustar00rootroot00000000000000662b9d0e3463029156069b12f918691a98f7dfb2ca0393c96bbfc6b1fbd630a2 curvedns-curvedns-0.87/nacl/tests/stream6.cpp000066400000000000000000000013701150631715100212750ustar00rootroot00000000000000#include using std::string; #include #include "crypto_stream_salsa20.h" #include "crypto_hash_sha256.h" char secondkey_bytes[32] = { 0xdc,0x90,0x8d,0xda,0x0b,0x93,0x44,0xa9 ,0x53,0x62,0x9b,0x73,0x38,0x20,0x77,0x88 ,0x80,0xf3,0xce,0xb4,0x21,0xbb,0x61,0xb9 ,0x1c,0xbd,0x4c,0x3e,0x66,0x25,0x6c,0xe4 } ; char noncesuffix_bytes[8] = { 0x82,0x19,0xe0,0x03,0x6b,0x7a,0x0b,0x37 } ; main() { int i; string secondkey(secondkey_bytes,sizeof secondkey_bytes); string noncesuffix(noncesuffix_bytes,sizeof noncesuffix_bytes); string output = crypto_stream_salsa20(4194304,noncesuffix,secondkey); string h = crypto_hash_sha256(output); for (i = 0;i < 32;++i) printf("%02x",(unsigned int) (unsigned char) h[i]); printf("\n"); return 0; } curvedns-curvedns-0.87/nacl/tests/stream6.out000066400000000000000000000001011150631715100213110ustar00rootroot00000000000000662b9d0e3463029156069b12f918691a98f7dfb2ca0393c96bbfc6b1fbd630a2 curvedns-curvedns-0.87/nacl/tests/stream7.cpp000066400000000000000000000014011150631715100212710ustar00rootroot00000000000000#include using std::string; #include #include "crypto_stream_xsalsa20.h" char firstkey_bytes[32] = { 0x1b,0x27,0x55,0x64,0x73,0xe9,0x85,0xd4 ,0x62,0xcd,0x51,0x19,0x7a,0x9a,0x46,0xc7 ,0x60,0x09,0x54,0x9e,0xac,0x64,0x74,0xf2 ,0x06,0xc4,0xee,0x08,0x44,0xf6,0x83,0x89 } ; char nonce_bytes[24] = { 0x69,0x69,0x6e,0xe9,0x55,0xb6,0x2b,0x73 ,0xcd,0x62,0xbd,0xa8,0x75,0xfc,0x73,0xd6 ,0x82,0x19,0xe0,0x03,0x6b,0x7a,0x0b,0x37 } ; main() { int i; string firstkey(firstkey_bytes,sizeof firstkey_bytes); string nonce(nonce_bytes,sizeof nonce_bytes); string rs = crypto_stream_xsalsa20(32,nonce,firstkey); for (i = 0;i < rs.size();++i) { printf(",0x%02x",(unsigned int) (unsigned char) rs[i]); if (i % 8 == 7) printf("\n"); } return 0; } curvedns-curvedns-0.87/nacl/tests/stream7.out000066400000000000000000000002441150631715100213220ustar00rootroot00000000000000,0xee,0xa6,0xa7,0x25,0x1c,0x1e,0x72,0x91 ,0x6d,0x11,0xc2,0xcb,0x21,0x4d,0x3c,0x25 ,0x25,0x39,0x12,0x1d,0x8e,0x23,0x4e,0x65 ,0x2d,0x65,0x1f,0xa4,0xc8,0xcf,0xf8,0x80 curvedns-curvedns-0.87/nacl/tests/stream8.cpp000066400000000000000000000032251150631715100213000ustar00rootroot00000000000000#include using std::string; #include #include "crypto_stream_xsalsa20.h" char firstkey_bytes[32] = { 0x1b,0x27,0x55,0x64,0x73,0xe9,0x85,0xd4 ,0x62,0xcd,0x51,0x19,0x7a,0x9a,0x46,0xc7 ,0x60,0x09,0x54,0x9e,0xac,0x64,0x74,0xf2 ,0x06,0xc4,0xee,0x08,0x44,0xf6,0x83,0x89 } ; char nonce_bytes[24] = { 0x69,0x69,0x6e,0xe9,0x55,0xb6,0x2b,0x73 ,0xcd,0x62,0xbd,0xa8,0x75,0xfc,0x73,0xd6 ,0x82,0x19,0xe0,0x03,0x6b,0x7a,0x0b,0x37 } ; char m_bytes[163] = { 0, 0, 0, 0, 0, 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0 ,0xbe,0x07,0x5f,0xc5,0x3c,0x81,0xf2,0xd5 ,0xcf,0x14,0x13,0x16,0xeb,0xeb,0x0c,0x7b ,0x52,0x28,0xc5,0x2a,0x4c,0x62,0xcb,0xd4 ,0x4b,0x66,0x84,0x9b,0x64,0x24,0x4f,0xfc ,0xe5,0xec,0xba,0xaf,0x33,0xbd,0x75,0x1a ,0x1a,0xc7,0x28,0xd4,0x5e,0x6c,0x61,0x29 ,0x6c,0xdc,0x3c,0x01,0x23,0x35,0x61,0xf4 ,0x1d,0xb6,0x6c,0xce,0x31,0x4a,0xdb,0x31 ,0x0e,0x3b,0xe8,0x25,0x0c,0x46,0xf0,0x6d ,0xce,0xea,0x3a,0x7f,0xa1,0x34,0x80,0x57 ,0xe2,0xf6,0x55,0x6a,0xd6,0xb1,0x31,0x8a ,0x02,0x4a,0x83,0x8f,0x21,0xaf,0x1f,0xde ,0x04,0x89,0x77,0xeb,0x48,0xf5,0x9f,0xfd ,0x49,0x24,0xca,0x1c,0x60,0x90,0x2e,0x52 ,0xf0,0xa0,0x89,0xbc,0x76,0x89,0x70,0x40 ,0xe0,0x82,0xf9,0x37,0x76,0x38,0x48,0x64 ,0x5e,0x07,0x05 } ; main() { int i; string firstkey(firstkey_bytes,sizeof firstkey_bytes); string nonce(nonce_bytes,sizeof nonce_bytes); string m(m_bytes,sizeof m_bytes); string c = crypto_stream_xsalsa20_xor(m,nonce,firstkey); for (i = 32;i < c.size();++i) { printf(",0x%02x",(unsigned int) (unsigned char) c[i]); if (i % 8 == 7) printf("\n"); } printf("\n"); return 0; } curvedns-curvedns-0.87/nacl/tests/stream8.out000066400000000000000000000012401150631715100213200ustar00rootroot00000000000000,0x8e,0x99,0x3b,0x9f,0x48,0x68,0x12,0x73 ,0xc2,0x96,0x50,0xba,0x32,0xfc,0x76,0xce ,0x48,0x33,0x2e,0xa7,0x16,0x4d,0x96,0xa4 ,0x47,0x6f,0xb8,0xc5,0x31,0xa1,0x18,0x6a ,0xc0,0xdf,0xc1,0x7c,0x98,0xdc,0xe8,0x7b ,0x4d,0xa7,0xf0,0x11,0xec,0x48,0xc9,0x72 ,0x71,0xd2,0xc2,0x0f,0x9b,0x92,0x8f,0xe2 ,0x27,0x0d,0x6f,0xb8,0x63,0xd5,0x17,0x38 ,0xb4,0x8e,0xee,0xe3,0x14,0xa7,0xcc,0x8a ,0xb9,0x32,0x16,0x45,0x48,0xe5,0x26,0xae ,0x90,0x22,0x43,0x68,0x51,0x7a,0xcf,0xea ,0xbd,0x6b,0xb3,0x73,0x2b,0xc0,0xe9,0xda ,0x99,0x83,0x2b,0x61,0xca,0x01,0xb6,0xde ,0x56,0x24,0x4a,0x9e,0x88,0xd5,0xf9,0xb3 ,0x79,0x73,0xf6,0x22,0xa4,0x3d,0x14,0xa6 ,0x59,0x9b,0x1f,0x65,0x4c,0xb4,0x5a,0x74 ,0xe3,0x55,0xa5 curvedns-curvedns-0.87/nacl/try-anything.c000066400000000000000000000072531150631715100206550ustar00rootroot00000000000000/* * try-anything.c version 20090215 * D. J. Bernstein * Public domain. */ #include #include #include #include #include #include #include #include #include "cpucycles.h" typedef int uint32; static uint32 seed[32] = { 3,1,4,1,5,9,2,6,5,3,5,8,9,7,9,3,2,3,8,4,6,2,6,4,3,3,8,3,2,7,9,5 } ; static uint32 in[12]; static uint32 out[8]; static int outleft = 0; #define ROTATE(x,b) (((x) << (b)) | ((x) >> (32 - (b)))) #define MUSH(i,b) x = t[i] += (((x ^ seed[i]) + sum) ^ ROTATE(x,b)); static void surf(void) { uint32 t[12]; uint32 x; uint32 sum = 0; int r; int i; int loop; for (i = 0;i < 12;++i) t[i] = in[i] ^ seed[12 + i]; for (i = 0;i < 8;++i) out[i] = seed[24 + i]; x = t[11]; for (loop = 0;loop < 2;++loop) { for (r = 0;r < 16;++r) { sum += 0x9e3779b9; MUSH(0,5) MUSH(1,7) MUSH(2,9) MUSH(3,13) MUSH(4,5) MUSH(5,7) MUSH(6,9) MUSH(7,13) MUSH(8,5) MUSH(9,7) MUSH(10,9) MUSH(11,13) } for (i = 0;i < 8;++i) out[i] ^= t[i + 4]; } } void randombytes(unsigned char *x,unsigned long long xlen) { while (xlen > 0) { if (!outleft) { if (!++in[0]) if (!++in[1]) if (!++in[2]) ++in[3]; surf(); outleft = 8; } *x = out[--outleft]; ++x; --xlen; } } extern void preallocate(void); extern void allocate(void); extern void predoit(void); extern void doit(void); extern char checksum[]; extern const char *checksum_compute(void); extern const char *primitiveimplementation; static void printword(const char *s) { if (!*s) putchar('-'); while (*s) { if (*s == ' ') putchar('_'); else if (*s == '\t') putchar('_'); else if (*s == '\r') putchar('_'); else if (*s == '\n') putchar('_'); else putchar(*s); ++s; } putchar(' '); } static void printnum(long long x) { printf("%lld ",x); } static void fail(const char *why) { printf("%s\n",why); exit(111); } unsigned char *alignedcalloc(unsigned long long len) { unsigned char *x = (unsigned char *) calloc(1,len + 256); long long i; if (!x) fail("out of memory"); /* will never deallocate so shifting is ok */ for (i = 0;i < len + 256;++i) x[i] = random(); x += 64; x += 63 & (-(unsigned long) x); for (i = 0;i < len;++i) x[i] = 0; return x; } #define TIMINGS 63 static long long cycles[TIMINGS + 1]; void limits() { #ifdef RLIM_INFINITY struct rlimit r; r.rlim_cur = 0; r.rlim_max = 0; #ifdef RLIMIT_NOFILE setrlimit(RLIMIT_NOFILE,&r); #endif #ifdef RLIMIT_NPROC setrlimit(RLIMIT_NPROC,&r); #endif #ifdef RLIMIT_CORE setrlimit(RLIMIT_CORE,&r); #endif #endif } int main() { long long i; long long j; long long abovej; long long belowj; long long checksumcycles; long long cyclespersecond; const char *problem; cyclespersecond = cpucycles_persecond(); preallocate(); limits(); allocate(); srandom(getpid()); cycles[0] = cpucycles(); problem = checksum_compute(); if (problem) fail(problem); cycles[1] = cpucycles(); checksumcycles = cycles[1] - cycles[0]; predoit(); for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); } for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); doit(); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; for (j = 0;j < TIMINGS;++j) { belowj = 0; for (i = 0;i < TIMINGS;++i) if (cycles[i] < cycles[j]) ++belowj; abovej = 0; for (i = 0;i < TIMINGS;++i) if (cycles[i] > cycles[j]) ++abovej; if (belowj * 2 < TIMINGS && abovej * 2 < TIMINGS) break; } printword(checksum); printnum(cycles[j]); printnum(checksumcycles); printnum(cyclespersecond); printword(primitiveimplementation); printf("\n"); return 0; } curvedns-curvedns-0.87/nacl/version000066400000000000000000000000111150631715100174450ustar00rootroot0000000000000020100830