pax_global_header 0000666 0000000 0000000 00000000064 14010511322 0014477 g ustar 00root root 0000000 0000000 52 comment=fbdffb2e74d926fa6ecf586268690496fe69483e
cm256cc-1.1.0/ 0000775 0000000 0000000 00000000000 14010511322 0012640 5 ustar 00root root 0000000 0000000 cm256cc-1.1.0/.gitattributes 0000664 0000000 0000000 00000000572 14010511322 0015537 0 ustar 00root root 0000000 0000000 # Auto detect text files and perform LF normalization
* text=auto
# Custom for Visual Studio
*.cs diff=csharp
# Standard to msysgit
*.doc diff=astextplain
*.DOC diff=astextplain
*.docx diff=astextplain
*.DOCX diff=astextplain
*.dot diff=astextplain
*.DOT diff=astextplain
*.pdf diff=astextplain
*.PDF diff=astextplain
*.rtf diff=astextplain
*.RTF diff=astextplain
cm256cc-1.1.0/.gitignore 0000664 0000000 0000000 00000001327 14010511322 0014633 0 ustar 00root root 0000000 0000000 # Windows image file caches
Thumbs.db
ehthumbs.db
# Folder config file
Desktop.ini
# Recycle Bin used on file shares
$RECYCLE.BIN/
# Windows Installer files
*.cab
*.msi
*.msm
*.msp
# Windows shortcuts
*.lnk
# MSVC temp files
*.obj
*.log
*.ilk
*.pdb
*.tlog
*.idb
*.opensdf
*.sdf
*.user
*.suo
# =========================
# Operating System Files
# =========================
# OSX
# =========================
.DS_Store
.AppleDouble
.LSOverride
# Thumbnails
._*
# Files that might appear on external disk
.Spotlight-V100
.Trashes
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
.cproject
.project
build/
builds/
# CLion project directory
.idea
cm256cc-1.1.0/CMakeLists.txt 0000664 0000000 0000000 00000033530 14010511322 0015404 0 ustar 00root root 0000000 0000000 cmake_minimum_required(VERSION 3.0)
# use, i.e. don't skip the full RPATH for the build tree
set(CMAKE_SKIP_BUILD_RPATH FALSE)
# when building, don't use the install RPATH already
# (but later on when installing)
set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)
SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib")
# add the automatically determined parts of the RPATH
# which point to directories outside the build tree to the install RPATH
set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
project(cm256cc)
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
set(MAJOR_VERSION 1)
set(MINOR_VERSION 1)
set(PATCH_VERSION 0)
set(PACKAGE libcm256cc)
set(VERSION_STRING ${MAJOR_VERSION}.${MINOR_VERSION}.${PATCH_VERSION})
set(VERSION ${VERSION_STRING})
option(BUILD_TOOLS "Build unit test tools" ON)
include(GNUInstallDirs)
set(LIB_INSTALL_DIR "${CMAKE_INSTALL_LIBDIR}") # "lib" or "lib64"
if (BUILD_TYPE MATCHES RELEASE)
set(CMAKE_BUILD_TYPE "Release")
elseif (BUILD_TYPE MATCHES RELEASEWITHDBGINFO)
set(CMAKE_BUILD_TYPE "ReleaseWithDebugInfo")
elseif (BUILD_TYPE MATCHES DEBUG)
set(CMAKE_BUILD_TYPE "Debug")
else()
set(CMAKE_BUILD_TYPE "Release")
endif()
##############################################################################
set(TEST_DIR ${PROJECT_SOURCE_DIR}/cmake/test)
# Clang or AppleClang (see CMP0025)
if(NOT DEFINED C_CLANG AND CMAKE_CXX_COMPILER_ID MATCHES "Clang")
set(C_CLANG 1)
endif()
if(NOT DEFINED C_GCC AND CMAKE_CXX_COMPILER_ID MATCHES "GNU")
set(C_GCC 1)
endif()
# Detect current compilation architecture and create standard definitions
# =======================================================================
include(CheckSymbolExists)
function(detect_architecture symbol arch)
if (NOT DEFINED ARCHITECTURE)
set(CMAKE_REQUIRED_QUIET 1)
check_symbol_exists("${symbol}" "" ARCHITECTURE_${arch})
unset(CMAKE_REQUIRED_QUIET)
# The output variable needs to be unique across invocations otherwise
# CMake's crazy scope rules will keep it defined
if (ARCHITECTURE_${arch})
set(ARCHITECTURE "${arch}" PARENT_SCOPE)
set(ARCHITECTURE_${arch} 1 PARENT_SCOPE)
add_definitions(-DARCHITECTURE_${arch}=1)
endif()
endif()
endfunction()
if (NOT ENABLE_GENERIC)
if (MSVC)
detect_architecture("_M_AMD64" x86_64)
detect_architecture("_M_IX86" x86)
detect_architecture("_M_ARM" ARM)
detect_architecture("_M_ARM64" ARM64)
else()
detect_architecture("__x86_64__" x86_64)
detect_architecture("__i386__" x86)
detect_architecture("__arm__" ARM)
detect_architecture("__aarch64__" ARM64)
endif()
endif()
if (NOT DEFINED ARCHITECTURE)
set(ARCHITECTURE "GENERIC")
set(ARCHITECTURE_GENERIC 1)
add_definitions(-DARCHITECTURE_GENERIC=1)
endif()
message(STATUS "Target architecture: ${ARCHITECTURE}")
# flag that set the minimum cpu flag requirements
# used to create re-distribuitable binary
if (ENABLE_DISTRIBUTION)
if (${ARCHITECTURE} MATCHES "x86_64|x86")
set(HAS_SSSE3 ON CACHE BOOL "SSSE3 SIMD enabled")
if(C_GCC OR C_CLANG)
set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mssse3" )
message(STATUS "Use SSSE3 SIMD instructions")
add_definitions(-DUSE_SSSE3)
elseif(MSVC)
set( CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /arch:SSSE3" )
set( CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /Oi /GL /Ot /Ox /arch:SSSE3" )
set( CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /LTCG" )
message(STATUS "Use MSVC SSSE3 SIMD instructions")
add_definitions (/D "_CRT_SECURE_NO_WARNINGS")
add_definitions(-DUSE_SSSE3)
endif()
elseif (${ARCHITECTURE} MATCHES "ARM|ARM64")
set(HAS_NEON ON CACHE BOOL "NEON SIMD enabled")
message(STATUS "Use NEON SIMD instructions")
add_definitions(-DUSE_NEON)
endif()
else ()
if (${ARCHITECTURE} MATCHES "x86_64|x86")
try_run(RUN_SSE2 COMPILE_SSE2 ${CMAKE_BINARY_DIR}/tmp ${TEST_DIR}/test_x86_sse2.cxx COMPILE_DEFINITIONS -msse2 -O0)
if(COMPILE_SSE2 AND RUN_SSE2 EQUAL 0)
set(HAS_SSE2 ON CACHE BOOL "Architecture has SSSE2 SIMD enabled")
if(C_GCC OR C_CLANG)
set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse2" )
message(STATUS "Use SSE2 SIMD instructions")
add_definitions(-DUSE_SSE2)
elseif(MSVC)
set( CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /arch:SSE2" )
set( CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /Oi /GL /Ot /Ox /arch:SSE2" )
set( CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /LTCG" )
add_definitions (/D "_CRT_SECURE_NO_WARNINGS")
add_definitions(-DUSE_SSE2)
endif()
else()
set(HAS_SSE2 OFF CACHE BOOL "Architecture does not have SSSE2 SIMD enabled")
endif()
try_run(RUN_SSSE3 COMPILE_SSSE3 ${CMAKE_BINARY_DIR}/tmp ${TEST_DIR}/test_x86_ssse3.cxx COMPILE_DEFINITIONS -mssse3 -O0)
if(COMPILE_SSSE3 AND RUN_SSSE3 EQUAL 0)
set(HAS_SSSE3 ON CACHE BOOL "Architecture has SSSE3 SIMD enabled")
if(C_GCC OR C_CLANG)
set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mssse3" )
message(STATUS "Use SSSE3 SIMD instructions")
add_definitions(-DUSE_SSSE3)
elseif(MSVC)
set( CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /arch:SSSE3" )
set( CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /Oi /GL /Ot /Ox /arch:SSSE3" )
set( CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /LTCG" )
message(STATUS "Use MSVC SSSE3 SIMD instructions")
add_definitions (/D "_CRT_SECURE_NO_WARNINGS")
add_definitions(-DUSE_SSSE3)
endif()
else()
set(HAS_SSSE3 OFF CACHE BOOL "Architecture does not have SSSE3 SIMD enabled")
endif()
try_run(RUN_SSE4_1 COMPILE_SSE4_1 ${CMAKE_BINARY_DIR}/tmp ${TEST_DIR}/test_x86_sse41.cxx COMPILE_DEFINITIONS -msse4.1 -O0)
if(COMPILE_SSE4_1 AND RUN_SSE4_1 EQUAL 0)
set(HAS_SSE4_1 ON CACHE BOOL "Architecture has SSE 4.1 SIMD enabled")
if(C_GCC OR C_CLANG)
set( CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -msse4.1" )
set( CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -msse4.1" )
message(STATUS "Use SSE 4.1 SIMD instructions")
add_definitions(-DUSE_SSE4_1)
elseif(MSVC)
set( CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /arch:SSE4_1" )
set( CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /Oi /GL /Ot /Ox /arch:SSE4_1" )
set( CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /LTCG" )
add_definitions (/D "_CRT_SECURE_NO_WARNINGS")
add_definitions(-DUSE_SSE4_1)
endif()
else()
set(HAS_SSE4_1 OFF CACHE BOOL "Architecture does not have SSE 4.1 SIMD enabled")
endif()
try_run(RUN_SSE4_2 COMPILE_SSE4_2 ${CMAKE_BINARY_DIR}/tmp ${TEST_DIR}/test_x86_sse42.cxx COMPILE_DEFINITIONS -msse4.2 -O0)
if(COMPILE_SSE4_2 AND RUN_SSE4_2 EQUAL 0)
set(HAS_SSE4_2 ON CACHE BOOL "Architecture has SSE 4.2 SIMD enabled")
if(C_GCC OR C_CLANG)
set( CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -msse4.2" )
set( CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -msse4.2" )
message(STATUS "Use SSE 4.2 SIMD instructions")
add_definitions(-DUSE_SSE4_2)
elseif(MSVC)
set( CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /arch:SSE4_2" )
set( CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /Oi /GL /Ot /Ox /arch:SSE4_2" )
set( CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /LTCG" )
add_definitions (/D "_CRT_SECURE_NO_WARNINGS")
add_definitions(-DUSE_SSE4_2)
endif()
else()
set(HAS_SSE4_2 OFF CACHE BOOL "Architecture does not have SSE 4.2 SIMD enabled")
endif()
try_run(RUN_AVX COMPILE_AVX ${CMAKE_BINARY_DIR}/tmp ${TEST_DIR}/test_x86_avx.cxx COMPILE_DEFINITIONS -mavx -O0)
if(COMPILE_AVX AND RUN_AVX EQUAL 0)
set(HAS_AVX ON CACHE BOOL "Architecture has AVX SIMD enabled")
if(C_GCC OR C_CLANG)
set( CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -mavx" )
set( CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -mavx" )
message(STATUS "Use AVX SIMD instructions")
add_definitions(-DUSE_AVX)
endif()
else()
set(HAS_AVX OFF CACHE BOOL "Architecture does not have AVX SIMD enabled")
endif()
try_run(RUN_AVX2 COMPILE_AVX2 ${CMAKE_BINARY_DIR}/tmp ${TEST_DIR}/test_x86_avx2.cxx COMPILE_DEFINITIONS -mavx2 -O0)
if(COMPILE_AVX2 AND RUN_AVX2 EQUAL 0)
set(HAS_AVX2 ON CACHE BOOL "Architecture has AVX2 SIMD enabled")
if(C_GCC OR C_CLANG)
set( CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -mavx2" )
set( CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -mavx2" )
message(STATUS "Use AVX2 SIMD instructions")
add_definitions(-DUSE_AVX2)
endif()
else()
set(HAS_AVX2 OFF CACHE BOOL "Architecture does not have AVX2 SIMD enabled")
endif()
try_run(RUN_AVX512 COMPILE_AVX512 ${CMAKE_BINARY_DIR}/tmp ${TEST_DIR}/test_x86_avx512.cxx COMPILE_DEFINITIONS -mavx512f -O0)
if(COMPILE_AVX512 AND RUN_AVX512 EQUAL 0)
set(HAS_AVX512 ON CACHE BOOL "Architecture has AVX512 SIMD enabled")
if(C_GCC OR C_CLANG)
set( CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -mavx512f" )
set( CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -mavx512f" )
message(STATUS "Use AVX512 SIMD instructions")
add_definitions(-DUSE_AVX512)
endif()
else()
set(HAS_AVX512 OFF CACHE BOOL "Architecture does not have AVX512 SIMD enabled")
endif()
elseif(ARCHITECTURE_ARM)
try_run(RUN_NEON COMPILE_NEON ${CMAKE_BINARY_DIR}/tmp ${TEST_DIR}/test_arm_neon.cxx COMPILE_DEFINITIONS -mfpu=neon -O0)
if(COMPILE_NEON AND RUN_NEON EQUAL 0)
set(HAS_NEON ON CACHE BOOL "Architecture has NEON SIMD enabled")
if(C_GCC OR C_CLANG)
set( CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -mfpu=neon" )
set( CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -mfpu=neon" )
message(STATUS "Use NEON SIMD instructions")
add_definitions(-DUSE_NEON)
endif()
else()
set(HAS_NEON OFF CACHE BOOL "Architecture does not have NEON SIMD enabled")
endif()
elseif(ARCHITECTURE_ARM64)
# Advanced SIMD (aka NEON) is mandatory for AArch64
set(HAS_NEON ON CACHE BOOL "Architecture has NEON SIMD enabled")
message(STATUS "Use NEON SIMD instructions")
add_definitions(-DUSE_NEON)
endif()
endif()
# clear binary test folder
FILE(REMOVE_RECURSE ${CMAKE_BINARY_DIR}/tmp)
##############################################################################
if(HAS_SSSE3)
message(STATUS "Architecture supports SSSE3 - OK")
elseif(HAS_NEON)
message(STATUS "Architecture supports Neon - OK")
else()
message(STATUS "Unsupported architecture - Terminated")
return()
endif()
# Compiler flags.
if(MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall ${EXTRA_FLAGS}")
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -O3 -ffast-math -ftree-vectorize ${EXTRA_FLAGS}")
endif()
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_CXX_COMPILER_ID MATCHES "Clang")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fmax-errors=10")
endif()
set( CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -std=c++11" )
set( CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -std=c++11" )
add_definitions(-DNO_RESTRICT)
set(cm256_SOURCES
cm256.cpp
gf256.cpp
)
set(cm256_HEADERS
cm256.h
gf256.h
sse2neon.h
export.h
)
include_directories(
.
${CMAKE_CURRENT_BINARY_DIR}
${Boost_INCLUDE_DIRS}
)
add_library(cm256cc SHARED
${cm256_SOURCES}
)
set_target_properties(cm256cc PROPERTIES VERSION ${VERSION} SOVERSION ${MAJOR_VERSION})
# single pass test
if(BUILD_TOOLS)
add_executable(cm256_test
unit_test/cm256_test.cpp
)
target_include_directories(cm256_test PUBLIC
${PROJECT_SOURCE_DIR}
${CMAKE_CURRENT_BINARY_DIR}
)
target_link_libraries(cm256_test cm256cc)
# transmit side test
add_executable(cm256_tx
unit_test/mainutils.cpp
unit_test/UDPSocket.cpp
unit_test/example0.cpp
unit_test/example1.cpp
unit_test/transmit.cpp
)
target_include_directories(cm256_tx PUBLIC
${PROJECT_SOURCE_DIR}
${CMAKE_CURRENT_BINARY_DIR}
)
target_link_libraries(cm256_tx cm256cc)
# receive side test
add_executable(cm256_rx
unit_test/mainutils.cpp
unit_test/UDPSocket.cpp
unit_test/example0.cpp
unit_test/example1.cpp
unit_test/receive.cpp
)
target_include_directories(cm256_rx PUBLIC
${PROJECT_SOURCE_DIR}
${CMAKE_CURRENT_BINARY_DIR}
)
target_link_libraries(cm256_rx cm256cc)
endif(BUILD_TOOLS)
########################################################################
# Create Pkg Config File
########################################################################
# use space-separation format for the pc file
STRING(REPLACE ";" " " CM256CC_PC_REQUIRES "${CM256CC_PC_REQUIRES}")
STRING(REPLACE ";" " " CM256CC_PC_CFLAGS "${CM256CC_PC_CFLAGS}")
STRING(REPLACE ";" " " CM256CC_PC_LIBS "${CM256CC_PC_LIBS}")
# unset these vars to avoid hard-coded paths to cross environment
IF(CMAKE_CROSSCOMPILING)
UNSET(CM256CC_PC_CFLAGS)
UNSET(CM256CC_PC_LIBS)
ENDIF(CMAKE_CROSSCOMPILING)
CONFIGURE_FILE(
${CMAKE_CURRENT_SOURCE_DIR}/libcm256cc.pc.in
${CMAKE_CURRENT_BINARY_DIR}/libcm256cc.pc
@ONLY)
INSTALL(
FILES ${CMAKE_CURRENT_BINARY_DIR}/libcm256cc.pc
DESTINATION ${LIB_INSTALL_DIR}/pkgconfig
)
# Installation
if(BUILD_TOOLS)
install(TARGETS cm256_test cm256_tx cm256_rx DESTINATION bin)
endif(BUILD_TOOLS)
install(TARGETS cm256cc DESTINATION ${LIB_INSTALL_DIR})
install(FILES ${cm256_HEADERS} DESTINATION include/${PROJECT_NAME})
cm256cc-1.1.0/LICENSE 0000664 0000000 0000000 00000104514 14010511322 0013652 0 ustar 00root root 0000000 0000000 GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
Copyright (C)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see .
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
Copyright (C)
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
. cm256cc-1.1.0/README.md 0000664 0000000 0000000 00000014417 14010511322 0014126 0 ustar 00root root 0000000 0000000 # cm256cc
Fast GF(256) Cauchy MDS Block Erasure Codec in C++
This is the rewrite in (as much as possible) clean C++ of [cm256](https://github.com/f4exb/cm256). In some contexts like Qt programs and plugins the original cm256 library does not work.
cm256cc performance is on par or even better than cm256. This is particularly true for armv7 architecture (Raspberry Pi 2 and 3) and is the most significant with Raspberry Pi 2.
cm256cc is a simple library for erasure codes. From given data it generates
redundant data that can be used to recover the originals.
Currently only g++ is supported, other versions of MSVC than Visual Studio 2013 may work. Optimizations for both SSE3 (x86_64) and Neon (armv7) are available.
The original data should be split up into equally-sized chunks. If one of these chunks
is erased, the redundant data can fill in the gap through decoding.
The erasure code is parameterized by three values (`OriginalCount`, `RecoveryCount`, `BlockBytes`). These are:
+ The number of blocks of original data (`OriginalCount`), which must be less than 256.
+ The number of blocks of redundant data (`RecoveryCount`), which must be no more than `256 - OriginalCount`.
For example, if a file is split into 3 equal pieces and sent over a network, `OriginalCount` is 3.
And if 2 additional redundant packets are generated, `RecoveryCount` is 2.
In this case up to 256 - 3 = 253 additional redundant packets can be generated.
##### Building: Quick Setup
This is a classical cmake project. Make sure cmake and g++ is installed in your system. create a `build` directory and cd into it. If you install the library in a custom location say `opt/install/cm256cc` use the following command line for cmake:
- `cmake -Wno-dev -DCMAKE_INSTALL_PREFIX=/opt/install/cm256cc ..`
Result:
- Library will be installed as `/opt/install/cm256cc/lib/libcm256cc.so`
- Include files will be installed in `/opt/install/cm256cc/include/cm256cc`
- Binary test programs will be installed in `/opt/install/cm256cc/bin`
##### Building: Use the library
Include the cm256cc library in your project and cm256.h header in your program. Have a look at example programs `cm256_test.cpp`, `transmit.cpp`and `receive.cpp` in the `unit_test` folder for usage. Consult the `cm256.h header` for details on the encoding / decoding method.
## Compilation
This is a classical cmake project. You may install the software anywhere you like with the `-DCMAKE_INSTALL_PREFIX` definition on the cmake command line.
The cmake file will try to find the best compiler optimization options depending on the hardware you are compiling this project. This may not be suitable if you intend to distribute the software or include it in a distribution. In this case you can use the `-DENABLE_DISTRIBUTION=1` define on the command line to have just SSSE3 optimization for the x86 based systems and still NEON optimization for arm or arm64.
## Usage
Documentation is provided in the header file [cm256.h](https://github.com/catid/cm256/raw/master/cm256.h).
When your application starts up it should call `isInitialized()` to verify that the library is constructed properly:
~~~
#include "cm256.h"
CM256 cm256;
if (!cm256.isInitialized()) {
// library not initialized
exit(1);
}
~~~
To generate redundancy, use the `cm256_encode` function. To solve for the original data use the `cm256_decode` function.
Example usage:
~~~
bool ExampleFileUsage()
{
CM256 cm256;
if (!cm256.isInitialized()) {
// library not initialized
exit(1);
}
CM256::cm256_encoder_params params;
// Number of bytes per file block
params.BlockBytes = 4321;
// Number of blocks
params.OriginalCount = 33;
// Number of additional recovery blocks generated by encoder
params.RecoveryCount = 12;
// Size of the original file
static const int OriginalFileBytes = params.OriginalCount * params.BlockBytes;
// Allocate and fill the original file data
uint8_t* originalFileData = new uint8_t[OriginalFileBytes];
memset(originalFileData, 1, OriginalFileBytes);
// Pointers to data
CM256::cm256_block blocks[256];
for (int i = 0; i < params.OriginalCount; ++i)
{
blocks[i].Block = originalFileData + i * params.BlockBytes;
}
// Recovery data
uint8_t* recoveryBlocks = new uint8_t[params.RecoveryCount * params.BlockBytes];
// Generate recovery data
if (cm256.cm256_encode(params, blocks, recoveryBlocks))
{
exit(1);
}
// Initialize the indices
for (int i = 0; i < params.OriginalCount; ++i)
{
blocks[i].Index = CM256::cm256_get_original_block_index(params, i);
}
//// Simulate loss of data, subsituting a recovery block in its place ////
blocks[0].Block = recoveryBlocks; // First recovery block
blocks[0].Index = cm256_get_recovery_block_index(params, 0); // First recovery block index
//// Simulate loss of data, subsituting a recovery block in its place ////
if (cm256.cm256_decode(params, blocks))
{
exit(1);
}
// blocks[0].Index will now be 0.
delete[] originalFileData;
delete[] recoveryBlocks;
return true;
}
~~~
The example above is just one way to use the `cm256_decode` function.
This API was designed to be flexible enough for UDP/IP-based file transfer where
the blocks arrive out of order.
#### Comparisons with Other Libraries
The approach taken in CM256 is similar to the Intel Storage Acceleration Library (ISA-L) available here:
https://01.org/intel%C2%AE-storage-acceleration-library-open-source-version/downloads
ISA-L more aggressively optimizes the matrix multiplication operation, which is the most expensive step of encoding.
CM256 takes better advantage of the m=1 case and the first recovery symbol, which is also possible with the Vandermonde matrices supported by ISA-L.
ISA-L uses a O(N^3) Gaussian elimination solver for decoding. The CM256 decoder solves the linear system using a fast O(N^2) LDU-decomposition algorithm from "Pivoting and Backward Stability of Fast Algorithms for Solving Cauchy Linear Equations" (T. Boros, T. Kailath, V. Olshevsky), which was hand-optimized for memory accesses.
#### Credits
This software was written entirely by Christopher A. Taylor and converted to clean C++ code by myself Edouard M. Griffiths .
cm256cc-1.1.0/cm256.cpp 0000664 0000000 0000000 00000044474 14010511322 0014215 0 ustar 00root root 0000000 0000000 /*
C++ version:
Copyright (c) 2016 Edouard M. Griffiths. All rights reserved.
Copyright (c) 2015 Christopher A. Taylor. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of CM256 nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "cm256.h"
CM256::CM256()
{
m_initialized = m_gf256Ctx.isInitialized();
}
CM256::~CM256()
{
}
/*
GF(256) Cauchy Matrix Overview
As described on Wikipedia, each element of a normal Cauchy matrix is defined as:
a_ij = 1 / (x_i - y_j)
The arrays x_i and y_j are vector parameters of the matrix.
The values in x_i cannot be reused in y_j.
Moving beyond the Wikipedia...
(1) Number of rows (R) is the range of i, and number of columns (C) is the range of j.
(2) Being able to select x_i and y_j makes Cauchy matrices more flexible in practice
than Vandermonde matrices, which only have one parameter per row.
(3) Cauchy matrices are always invertible, AKA always full rank, AKA when treated as
as linear system y = M*x, the linear system has a single solution.
(4) A Cauchy matrix concatenated below a square CxC identity matrix always has rank C,
Meaning that any R rows can be eliminated from the concatenated matrix and the
matrix will still be invertible. This is how Reed-Solomon erasure codes work.
(5) Any row or column can be multiplied by non-zero values, and the resulting matrix
is still full rank. This is true for any matrix, since it is effectively the same
as pre and post multiplying by diagonal matrices, which are always invertible.
(6) Matrix elements with a value of 1 are much faster to operate on than other values.
For instance a matrix of [1, 1, 1, 1, 1] is invertible and much faster for various
purposes than [2, 2, 2, 2, 2].
(7) For GF(256) matrices, the symbols in x_i and y_j are selected from the numbers
0...255, and so the number of rows + number of columns may not exceed 256.
Note that values in x_i and y_j may not be reused as stated above.
In summary, Cauchy matrices
are preferred over Vandermonde matrices. (2)
are great for MDS erasure codes. (3) and (4)
should be optimized to include more 1 elements. (5) and (6)
have a limited size in GF(256), rows+cols <= 256. (7)
*/
/*
Selected Cauchy Matrix Form
The matrix consists of elements a_ij, where i = row, j = column.
a_ij = 1 / (x_i - y_j), where x_i and y_j are sets of GF(256) values
that do not intersect.
We select x_i and y_j to just be incrementing numbers for the
purposes of this library. Further optimizations may yield matrices
with more 1 elements, but the benefit seems relatively small.
The x_i values range from 0...(originalCount - 1).
The y_j values range from originalCount...(originalCount + recoveryCount - 1).
We then improve the Cauchy matrix by dividing each column by the
first row element of that column. The result is an invertible
matrix that has all 1 elements in the first row. This is equivalent
to a rotated Vandermonde matrix, so we could have used one of those.
The advantage of doing this is that operations involving the first
row will be extremely fast (just memory XOR), so the decoder can
be optimized to take advantage of the shortcut when the first
recovery row can be used.
First row element of Cauchy matrix for each column:
a_0j = 1 / (x_0 - y_j) = 1 / (x_0 - y_j)
Our Cauchy matrix sets first row to ones, so:
a_ij = (1 / (x_i - y_j)) / a_0j
a_ij = (y_j - x_0) / (x_i - y_j)
a_ij = (y_j + x_0) div (x_i + y_j) in GF(256)
*/
//-----------------------------------------------------------------------------
// Encoding
void CM256::cm256_encode_block(
cm256_encoder_params params, // Encoder parameters
cm256_block* originals, // Array of pointers to original blocks
int recoveryBlockIndex, // Return value from cm256_get_recovery_block_index()
void* recoveryBlock) // Output recovery block
{
// If only one block of input data,
if (params.OriginalCount == 1)
{
// No meaningful operation here, degenerate to outputting the same data each time.
memcpy(recoveryBlock, originals[0].Block, params.BlockBytes);
return;
}
// else OriginalCount >= 2:
// Unroll first row of recovery matrix:
// The matrix we generate for the first row is all ones,
// so it is merely a parity of the original data.
if (recoveryBlockIndex == params.OriginalCount)
{
gf256_ctx::gf256_addset_mem(recoveryBlock, originals[0].Block, originals[1].Block, params.BlockBytes);
for (int j = 2; j < params.OriginalCount; ++j)
{
gf256_ctx::gf256_add_mem(recoveryBlock, originals[j].Block, params.BlockBytes);
}
return;
}
// TBD: Faster algorithms seem to exist for computing this matrix-vector product.
// Start the x_0 values arbitrarily from the original count.
const uint8_t x_0 = static_cast(params.OriginalCount);
// For other rows:
{
const uint8_t x_i = static_cast(recoveryBlockIndex);
// Unroll first operation for speed
{
const uint8_t y_0 = 0;
const uint8_t matrixElement = m_gf256Ctx.getMatrixElement(x_i, x_0, y_0);
m_gf256Ctx.gf256_mul_mem(recoveryBlock, originals[0].Block, matrixElement, params.BlockBytes);
}
// For each original data column,
for (int j = 1; j < params.OriginalCount; ++j)
{
const uint8_t y_j = static_cast(j);
const uint8_t matrixElement = m_gf256Ctx.getMatrixElement(x_i, x_0, y_j);
m_gf256Ctx.gf256_muladd_mem(recoveryBlock, matrixElement, originals[j].Block, params.BlockBytes);
}
}
}
int CM256::cm256_encode(
cm256_encoder_params params, // Encoder params
cm256_block* originals, // Array of pointers to original blocks
void* recoveryBlocks) // Output recovery blocks end-to-end
{
// Validate input:
if (params.OriginalCount <= 0 ||
params.RecoveryCount <= 0 ||
params.BlockBytes <= 0)
{
return -1;
}
if (params.OriginalCount + params.RecoveryCount > 256)
{
return -2;
}
if (!originals || !recoveryBlocks)
{
return -3;
}
uint8_t* recoveryBlock = static_cast(recoveryBlocks);
for (int block = 0; block < params.RecoveryCount; ++block, recoveryBlock += params.BlockBytes)
{
cm256_encode_block(params, originals, (params.OriginalCount + block), recoveryBlock);
}
return 0;
}
//-----------------------------------------------------------------------------
// Decoding
CM256::CM256Decoder::CM256Decoder(gf256_ctx& gf256Ctx) :
RecoveryCount(0),
OriginalCount(0),
m_gf256Ctx(gf256Ctx)
{
}
CM256::CM256Decoder::~CM256Decoder()
{
}
bool CM256::CM256Decoder::Initialize(cm256_encoder_params& params, cm256_block* blocks)
{
Params = params;
cm256_block* block = blocks;
OriginalCount = 0;
RecoveryCount = 0;
// Initialize erasures to zeros
for (int ii = 0; ii < params.OriginalCount; ++ii)
{
ErasuresIndices[ii] = 0;
}
// For each input block,
for (int ii = 0; ii < params.OriginalCount; ++ii, ++block)
{
int row = block->Index;
// If it is an original block,
if (row < params.OriginalCount)
{
Original[OriginalCount++] = block;
if (ErasuresIndices[row] != 0)
{
// Error out if two row indices repeat
return false;
}
ErasuresIndices[row] = 1;
}
else
{
Recovery[RecoveryCount++] = block;
}
}
// Identify erasures
for (int ii = 0, indexCount = 0; ii < 256; ++ii)
{
if (!ErasuresIndices[ii])
{
ErasuresIndices[indexCount] = static_cast( ii );
if (++indexCount >= RecoveryCount)
{
break;
}
}
}
return true;
}
void CM256::CM256Decoder::DecodeM1()
{
// XOR all other blocks into the recovery block
uint8_t* outBlock = static_cast(Recovery[0]->Block);
const uint8_t* inBlock = nullptr;
// For each block,
for (int ii = 0; ii < OriginalCount; ++ii)
{
const uint8_t* inBlock2 = static_cast(Original[ii]->Block);
if (!inBlock)
{
inBlock = inBlock2;
}
else
{
// outBlock ^= inBlock ^ inBlock2
gf256_ctx::gf256_add2_mem(outBlock, inBlock, inBlock2, Params.BlockBytes);
inBlock = nullptr;
}
}
// Complete XORs
if (inBlock)
{
gf256_ctx::gf256_add_mem(outBlock, inBlock, Params.BlockBytes);
}
// Recover the index it corresponds to
Recovery[0]->Index = ErasuresIndices[0];
}
// Generate the LU decomposition of the matrix
void CM256::CM256Decoder::GenerateLDUDecomposition(uint8_t* matrix_L, uint8_t* diag_D, uint8_t* matrix_U)
{
// Schur-type-direct-Cauchy algorithm 2.5 from
// "Pivoting and Backward Stability of Fast Algorithms for Solving Cauchy Linear Equations"
// T. Boros, T. Kailath, V. Olshevsky
// Modified for practical use. I folded the diagonal parts of U/L matrices into the
// diagonal one to reduce the number of multiplications to perform against the input data,
// and organized the triangle matrices in memory to allow for faster SSE3 GF multiplications.
// Matrix size NxN
const int N = RecoveryCount;
// Generators
uint8_t g[256], b[256];
for (int i = 0; i < N; ++i)
{
g[i] = 1;
b[i] = 1;
}
// Temporary buffer for rotated row of U matrix
// This allows for faster GF bulk multiplication
uint8_t rotated_row_U[256];
uint8_t* last_U = matrix_U + ((N - 1) * N) / 2 - 1;
int firstOffset_U = 0;
// Start the x_0 values arbitrarily from the original count.
const uint8_t x_0 = static_cast(Params.OriginalCount);
// Unrolling k = 0 just makes it slower for some reason.
for (int k = 0; k < N - 1; ++k)
{
const uint8_t x_k = Recovery[k]->Index;
const uint8_t y_k = ErasuresIndices[k];
// D_kk = (x_k + y_k)
// L_kk = g[k] / (x_k + y_k)
// U_kk = b[k] * (x_0 + y_k) / (x_k + y_k)
const uint8_t D_kk = gf256_ctx::gf256_add(x_k, y_k);
const uint8_t L_kk = m_gf256Ctx.gf256_div(g[k], D_kk);
const uint8_t U_kk = m_gf256Ctx.gf256_mul(m_gf256Ctx.gf256_div(b[k], D_kk), gf256_ctx::gf256_add(x_0, y_k));
// diag_D[k] = D_kk * L_kk * U_kk
diag_D[k] = m_gf256Ctx.gf256_mul(D_kk, m_gf256Ctx.gf256_mul(L_kk, U_kk));
// Computing the k-th row of L and U
uint8_t* row_L = matrix_L;
uint8_t* row_U = rotated_row_U;
for (int j = k + 1; j < N; ++j)
{
const uint8_t x_j = Recovery[j]->Index;
const uint8_t y_j = ErasuresIndices[j];
// L_jk = g[j] / (x_j + y_k)
// U_kj = b[j] / (x_k + y_j)
const uint8_t L_jk = m_gf256Ctx.gf256_div(g[j], gf256_ctx::gf256_add(x_j, y_k));
const uint8_t U_kj = m_gf256Ctx.gf256_div(b[j], gf256_ctx::gf256_add(x_k, y_j));
*matrix_L++ = L_jk;
*row_U++ = U_kj;
// g[j] = g[j] * (x_j + x_k) / (x_j + y_k)
// b[j] = b[j] * (y_j + y_k) / (y_j + x_k)
g[j] = m_gf256Ctx.gf256_mul(g[j], m_gf256Ctx.gf256_div(gf256_ctx::gf256_add(x_j, x_k), gf256_ctx::gf256_add(x_j, y_k)));
b[j] = m_gf256Ctx.gf256_mul(b[j], m_gf256Ctx.gf256_div(gf256_ctx::gf256_add(y_j, y_k), gf256_ctx::gf256_add(y_j, x_k)));
}
// Do these row/column divisions in bulk for speed.
// L_jk /= L_kk
// U_kj /= U_kk
const int count = N - (k + 1);
m_gf256Ctx.gf256_div_mem(row_L, row_L, L_kk, count);
m_gf256Ctx.gf256_div_mem(rotated_row_U, rotated_row_U, U_kk, count);
// Copy U matrix row into place in memory.
uint8_t* output_U = last_U + firstOffset_U;
row_U = rotated_row_U;
for (int j = k + 1; j < N; ++j)
{
*output_U = *row_U++;
output_U -= j;
}
firstOffset_U -= k + 2;
}
// Multiply diagonal matrix into U
uint8_t* row_U = matrix_U;
for (int j = N - 1; j > 0; --j)
{
const uint8_t y_j = ErasuresIndices[j];
const int count = j;
m_gf256Ctx.gf256_mul_mem(row_U, row_U, gf256_ctx::gf256_add(x_0, y_j), count);
row_U += count;
}
const uint8_t x_n = Recovery[N - 1]->Index;
const uint8_t y_n = ErasuresIndices[N - 1];
// D_nn = 1 / (x_n + y_n)
// L_nn = g[N-1]
// U_nn = b[N-1] * (x_0 + y_n)
const uint8_t L_nn = g[N - 1];
const uint8_t U_nn = m_gf256Ctx.gf256_mul(b[N - 1], gf256_ctx::gf256_add(x_0, y_n));
// diag_D[N-1] = L_nn * D_nn * U_nn
diag_D[N - 1] = m_gf256Ctx.gf256_div(m_gf256Ctx.gf256_mul(L_nn, U_nn), gf256_ctx::gf256_add(x_n, y_n));
}
void CM256::CM256Decoder::Decode()
{
// Matrix size is NxN, where N is the number of recovery blocks used.
const int N = RecoveryCount;
// Start the x_0 values arbitrarily from the original count.
const uint8_t x_0 = static_cast(Params.OriginalCount);
// Eliminate original data from the the recovery rows
for (int originalIndex = 0; originalIndex < OriginalCount; ++originalIndex)
{
const uint8_t* inBlock = static_cast(Original[originalIndex]->Block);
const uint8_t inRow = Original[originalIndex]->Index;
for (int recoveryIndex = 0; recoveryIndex < N; ++recoveryIndex)
{
uint8_t* outBlock = static_cast(Recovery[recoveryIndex]->Block);
const uint8_t x_i = Recovery[recoveryIndex]->Index;
const uint8_t y_j = inRow;
const uint8_t matrixElement = m_gf256Ctx.getMatrixElement(x_i, x_0, y_j);
m_gf256Ctx.gf256_muladd_mem(outBlock, matrixElement, inBlock, Params.BlockBytes);
}
}
// Allocate matrix
static const int StackAllocSize = 2048;
uint8_t stackMatrix[StackAllocSize];
uint8_t* dynamicMatrix = nullptr;
uint8_t* matrix = stackMatrix;
const int requiredSpace = N * N;
if (requiredSpace > StackAllocSize)
{
dynamicMatrix = new uint8_t[requiredSpace];
matrix = dynamicMatrix;
}
/*
Compute matrix decomposition:
G = L * D * U
L is lower-triangular, diagonal is all ones.
D is a diagonal matrix.
U is upper-triangular, diagonal is all ones.
*/
uint8_t* matrix_U = matrix;
uint8_t* diag_D = matrix_U + (N - 1) * N / 2;
uint8_t* matrix_L = diag_D + N;
GenerateLDUDecomposition(matrix_L, diag_D, matrix_U);
/*
Eliminate lower left triangle.
*/
// For each column,
for (int j = 0; j < N - 1; ++j)
{
const void* block_j = Recovery[j]->Block;
// For each row,
for (int i = j + 1; i < N; ++i)
{
void* block_i = Recovery[i]->Block;
const uint8_t c_ij = *matrix_L++; // Matrix elements are stored column-first, top-down.
m_gf256Ctx.gf256_muladd_mem(block_i, c_ij, block_j, Params.BlockBytes);
}
}
/*
Eliminate diagonal.
*/
for (int i = 0; i < N; ++i)
{
void* block = Recovery[i]->Block;
Recovery[i]->Index = ErasuresIndices[i];
m_gf256Ctx.gf256_div_mem(block, block, diag_D[i], Params.BlockBytes);
}
/*
Eliminate upper right triangle.
*/
for (int j = N - 1; j >= 1; --j)
{
const void* block_j = Recovery[j]->Block;
for (int i = j - 1; i >= 0; --i)
{
void* block_i = Recovery[i]->Block;
const uint8_t c_ij = *matrix_U++; // Matrix elements are stored column-first, bottom-up.
m_gf256Ctx.gf256_muladd_mem(block_i, c_ij, block_j, Params.BlockBytes);
}
}
delete[] dynamicMatrix;
}
int CM256::cm256_decode(
cm256_encoder_params params, // Encoder params
cm256_block* blocks) // Array of 'originalCount' blocks as described above
{
if (params.OriginalCount <= 0 ||
params.RecoveryCount <= 0 ||
params.BlockBytes <= 0)
{
return -1;
}
if (params.OriginalCount + params.RecoveryCount > 256)
{
return -2;
}
if (!blocks)
{
return -3;
}
// If there is only one block,
if (params.OriginalCount == 1)
{
// It is the same block repeated
blocks[0].Index = 0;
return 0;
}
CM256Decoder state(m_gf256Ctx);
if (!state.Initialize(params, blocks))
{
return -5;
}
// If nothing is erased,
if (state.RecoveryCount <= 0)
{
return 0;
}
// If m=1,
if (params.RecoveryCount == 1)
{
state.DecodeM1();
return 0;
}
// Decode for m>1
state.Decode();
return 0;
}
cm256cc-1.1.0/cm256.h 0000664 0000000 0000000 00000016325 14010511322 0013654 0 ustar 00root root 0000000 0000000 /*
C++ version:
Copyright (c) 2016 Edouard M. Griffiths. All rights reserved.
Copyright (c) 2015 Christopher A. Taylor. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of CM256 nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CM256_H
#define CM256_H
#include
#include "gf256.h"
#include "export.h"
class CM256CC_API CM256
{
public:
// Encoder parameters
typedef struct cm256_encoder_params_t {
// Original block count < 256
int OriginalCount;
// Recovery block count < 256
int RecoveryCount;
// Number of bytes per block (all blocks are the same size in bytes)
int BlockBytes;
} cm256_encoder_params;
// Descriptor for data block
typedef struct cm256_block_t {
// Pointer to data received.
void* Block;
// Block index.
// For original data, it will be in the range
// [0..(originalCount-1)] inclusive.
// For recovery data, the first one's Index must be originalCount,
// and it will be in the range
// [originalCount..(originalCount+recoveryCount-1)] inclusive.
unsigned char Index;
// Ignored during encoding, required during decoding.
} cm256_block;
CM256();
~CM256();
bool isInitialized() const { return m_initialized; };
/*
* Cauchy MDS GF(256) encode
*
* This produces a set of recovery blocks that should be transmitted after the
* original data blocks.
*
* It takes in 'originalCount' equal-sized blocks and produces 'recoveryCount'
* equally-sized recovery blocks.
*
* The input 'originals' array allows more natural usage of the library.
* The output recovery blocks are stored end-to-end in 'recoveryBlocks'.
* 'recoveryBlocks' should have recoveryCount * blockBytes bytes available.
*
* Precondition: originalCount + recoveryCount <= 256
*
* When transmitting the data, the block index of the data should be sent,
* and the recovery block index is also needed. The decoder should also
* be provided with the values of originalCount, recoveryCount and blockBytes.
*
* Example wire format:
* [originalCount(1 byte)] [recoveryCount(1 byte)]
* [blockIndex(1 byte)] [blockData(blockBytes bytes)]
*
* Be careful not to mix blocks from different encoders.
*
* It is possible to support variable-length data by including the original
* data length at the front of each message in 2 bytes, such that when it is
* recovered after a loss the data length is available in the block data and
* the remaining bytes of padding can be neglected.
*
* Returns 0 on success, and any other code indicates failure.
*/
int cm256_encode(
cm256_encoder_params params, // Encoder parameters
cm256_block* originals, // Array of pointers to original blocks
void* recoveryBlocks); // Output recovery blocks end-to-end
/*
* Cauchy MDS GF(256) decode
*
* This recovers the original data from the recovery data in the provided
* blocks. There should be 'originalCount' blocks in the provided array.
* Recovery will always be possible if that many blocks are received.
*
* Provide the same values for 'originalCount', 'recoveryCount', and
* 'blockBytes' used by the encoder.
*
* The block Index should be set to the block index of the original data,
* as described in the cm256_block struct comments above.
*
* Recovery blocks will be replaced with original data and the Index
* will be updated to indicate the original block that was recovered.
*
* Returns 0 on success, and any other code indicates failure.
*/
int cm256_decode(
cm256_encoder_params params, // Encoder parameters
cm256_block* blocks); // Array of 'originalCount' blocks as described above
/*
* Commodity functions
*/
// Compute the value to put in the Index member of cm256_block
static inline unsigned char cm256_get_recovery_block_index(cm256_encoder_params params, int recoveryBlockIndex)
{
assert(recoveryBlockIndex >= 0 && recoveryBlockIndex < params.RecoveryCount);
return (unsigned char)(params.OriginalCount + recoveryBlockIndex);
}
static inline unsigned char cm256_get_original_block_index(cm256_encoder_params params, int originalBlockIndex)
{
(void) params;
assert(originalBlockIndex >= 0 && originalBlockIndex < params.OriginalCount);
return (unsigned char)(originalBlockIndex);
}
private:
class CM256CC_API CM256Decoder
{
public:
CM256Decoder(gf256_ctx& gf256Ctx);
~CM256Decoder();
// Encode parameters
cm256_encoder_params Params;
// Recovery blocks
cm256_block* Recovery[256];
int RecoveryCount;
// Original blocks
cm256_block* Original[256];
int OriginalCount;
// Row indices that were erased
uint8_t ErasuresIndices[256];
// Initialize the decoder
bool Initialize(cm256_encoder_params& params, cm256_block* blocks);
// Decode m=1 case
void DecodeM1();
// Decode for m>1 case
void Decode();
// Generate the LU decomposition of the matrix
void GenerateLDUDecomposition(uint8_t* matrix_L, uint8_t* diag_D, uint8_t* matrix_U);
private:
gf256_ctx& m_gf256Ctx;
};
// Encode one block.
// Note: This function does not validate input, use with care.
void cm256_encode_block(
cm256_encoder_params params, // Encoder parameters
cm256_block* originals, // Array of pointers to original blocks
int recoveryBlockIndex, // Return value from cm256_get_recovery_block_index()
void* recoveryBlock); // Output recovery block
gf256_ctx m_gf256Ctx;
bool m_initialized;
};
#endif // CM256_H
cm256cc-1.1.0/cmake/ 0000775 0000000 0000000 00000000000 14010511322 0013720 5 ustar 00root root 0000000 0000000 cm256cc-1.1.0/cmake/test/ 0000775 0000000 0000000 00000000000 14010511322 0014677 5 ustar 00root root 0000000 0000000 cm256cc-1.1.0/cmake/test/test_arm_neon.cxx 0000664 0000000 0000000 00000000420 14010511322 0020254 0 ustar 00root root 0000000 0000000 #include
#include
#include
#include
void signalHandler(int signum) {
exit(signum); // SIGILL = 4
}
int main(int argc, char* argv[])
{
signal(SIGILL, signalHandler);
uint32x4_t x={0};
x=veorq_u32(x,x);
return 0;
}
cm256cc-1.1.0/cmake/test/test_x86_avx.cxx 0000664 0000000 0000000 00000000423 14010511322 0017764 0 ustar 00root root 0000000 0000000 #include
#include
#include
void signalHandler(int signum) {
exit(signum); // SIGILL = 4
}
int main(int argc, char* argv[])
{
signal(SIGILL, signalHandler);
__m256d x = _mm256_setzero_pd();
x=_mm256_addsub_pd(x,x);
return 0;
}
cm256cc-1.1.0/cmake/test/test_x86_avx2.cxx 0000664 0000000 0000000 00000000427 14010511322 0020052 0 ustar 00root root 0000000 0000000 #include
#include
#include
void signalHandler(int signum) {
exit(signum); // SIGILL = 4
}
int main(int argc, char* argv[])
{
signal(SIGILL, signalHandler);
__m256i x = _mm256_setzero_si256();
x=_mm256_add_epi64 (x,x);
return 0;
}
cm256cc-1.1.0/cmake/test/test_x86_avx512.cxx 0000664 0000000 0000000 00000000470 14010511322 0020216 0 ustar 00root root 0000000 0000000 #include
#include
#include
#include
void signalHandler(int signum) {
exit(signum); // SIGILL = 4
}
int main(int argc, char* argv[])
{
signal(SIGILL, signalHandler);
uint64_t x[8] = {0};
__m512i y = _mm512_loadu_si512((__m512i*)x);
return 0;
}
cm256cc-1.1.0/cmake/test/test_x86_sse2.cxx 0000664 0000000 0000000 00000000420 14010511322 0020037 0 ustar 00root root 0000000 0000000 #include
#include
#include
void signalHandler(int signum) {
exit(signum); // SIGILL = 4
}
int main(int argc, char* argv[])
{
signal(SIGILL, signalHandler);
__m128i x = _mm_setzero_si128();
x=_mm_add_epi64(x,x);
return 0;
}
cm256cc-1.1.0/cmake/test/test_x86_sse3.cxx 0000664 0000000 0000000 00000000444 14010511322 0020046 0 ustar 00root root 0000000 0000000 #include
#include
#include
#include
void signalHandler(int signum) {
exit(signum); // SIGILL = 4
}
int main(int argc, char* argv[])
{
signal(SIGILL, signalHandler);
__m128d x = _mm_setzero_pd();
x=_mm_addsub_pd(x,x);
return 0;
}
cm256cc-1.1.0/cmake/test/test_x86_sse41.cxx 0000664 0000000 0000000 00000000557 14010511322 0020135 0 ustar 00root root 0000000 0000000 #include
#include
#include
#include
void signalHandler(int signum) {
exit(signum); // SIGILL = 4
}
int main(int argc, char* argv[])
{
signal(SIGILL, signalHandler);
__m128i x = _mm_setzero_si128();
__m128i a = _mm_setzero_si128();
__m128i b = _mm_setzero_si128();
x=_mm_blend_epi16(a,b,4);
return 0;
}
cm256cc-1.1.0/cmake/test/test_x86_sse42.cxx 0000664 0000000 0000000 00000000401 14010511322 0020122 0 ustar 00root root 0000000 0000000 #include
#include
#include
void signalHandler(int signum) {
exit(signum); // SIGILL = 4
}
int main(int argc, char* argv[])
{
signal(SIGILL, signalHandler);
unsigned int x=32;
x=_mm_crc32_u8(x,4);
return 0;
}
cm256cc-1.1.0/cmake/test/test_x86_ssse3.cxx 0000664 0000000 0000000 00000000453 14010511322 0020231 0 ustar 00root root 0000000 0000000 #include
#include
#include
#include
void signalHandler(int signum) {
exit(signum); // SIGILL = 4
}
int main(int argc, char* argv[])
{
signal(SIGILL, signalHandler);
__m128i x = _mm_setzero_si128();
x=_mm_alignr_epi8(x,x,2);
return 0;
}
cm256cc-1.1.0/export.h 0000664 0000000 0000000 00000003717 14010511322 0014342 0 ustar 00root root 0000000 0000000 ///////////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2018 Edouard Griffiths, F4EXB. //
// //
// This program is free software; you can redistribute it and/or modify //
// it under the terms of the GNU General Public License as published by //
// the Free Software Foundation as version 3 of the License, or //
// //
// This program is distributed in the hope that it will be useful, //
// but WITHOUT ANY WARRANTY; without even the implied warranty of //
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the //
// GNU General Public License V3 for more details. //
// //
// You should have received a copy of the GNU General Public License //
// along with this program. If not, see . //
///////////////////////////////////////////////////////////////////////////////////
#ifndef __CM256CC_EXPORT_H
#define __CM256CC_EXPORT_H
#if defined (__GNUC__) && (__GNUC__ >= 4)
# define __CM256CC_EXPORT __attribute__((visibility("default")))
# define __CM256CC_IMPORT __attribute__((visibility("default")))
#elif defined (_MSC_VER)
# define __CM256CC_EXPORT __declspec(dllexport)
# define __CM256CC_IMPORT __declspec(dllimport)
#else
# define __CM256CC_EXPORT
# define __CM256CC_IMPORT
#endif
/* The 'CM256CC_API' controls the import/export of 'sdrbase' symbols and classes.
*/
#if !defined(cm256cc_STATIC)
# if defined cm256cc_EXPORTS
# define CM256CC_API __CM256CC_EXPORT
# else
# define CM256CC_API __CM256CC_IMPORT
# endif
#else
# define CM256CC_API
#endif
#endif // __CM256CC_EXPORT_H cm256cc-1.1.0/gf256.cpp 0000664 0000000 0000000 00000056400 14010511322 0014202 0 ustar 00root root 0000000 0000000 /*
Copyright (c) 2015 Christopher A. Taylor. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of CM256 nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include
#include
#include "gf256.h"
const uint8_t gf256_ctx::GF256_GEN_POLY[GF256_GEN_POLY_COUNT] = {
0x8e, 0x95, 0x96, 0xa6, 0xaf, 0xb1, 0xb2, 0xb4,
0xb8, 0xc3, 0xc6, 0xd4, 0xe1, 0xe7, 0xf3, 0xfa,
};
gf256_ctx::gf256_ctx() :
initialized(false)
{
gf256_init_();
}
gf256_ctx::~gf256_ctx()
{
}
// Select which polynomial to use
void gf256_ctx::gf255_poly_init(int polynomialIndex)
{
if (polynomialIndex < 0 || polynomialIndex >= GF256_GEN_POLY_COUNT)
{
polynomialIndex = 0;
}
Polynomial = (GF256_GEN_POLY[polynomialIndex] << 1) | 1;
}
//-----------------------------------------------------------------------------
// Exponential and Log Tables
// Construct EXP and LOG tables from polynomial
void gf256_ctx::gf256_explog_init()
{
unsigned poly = Polynomial;
uint8_t* exptab = GF256_EXP_TABLE;
uint16_t* logtab = GF256_LOG_TABLE;
logtab[0] = 512;
exptab[0] = 1;
for (unsigned jj = 1; jj < 255; ++jj)
{
unsigned next = (unsigned)exptab[jj - 1] * 2;
if (next >= 256) next ^= poly;
exptab[jj] = static_cast( next );
logtab[exptab[jj]] = static_cast( jj );
}
exptab[255] = exptab[0];
logtab[exptab[255]] = 255;
for (unsigned jj = 256; jj < 2 * 255; ++jj)
{
exptab[jj] = exptab[jj % 255];
}
exptab[2 * 255] = 1;
for (unsigned jj = 2 * 255 + 1; jj < 4 * 255; ++jj)
{
exptab[jj] = 0;
}
}
//-----------------------------------------------------------------------------
// Multiply and Divide Tables
// Initialize MUL and DIV tables using LOG and EXP tables
void gf256_ctx::gf256_muldiv_init()
{
// Allocate table memory 65KB x 2
uint8_t* m = GF256_MUL_TABLE;
uint8_t* d = GF256_DIV_TABLE;
// Unroll y = 0 subtable
for (int x = 0; x < 256; ++x)
{
m[x] = d[x] = 0;
}
// For each other y value,
for (int y = 1; y < 256; ++y)
{
// Calculate log(y) for mult and 255 - log(y) for div
const uint8_t log_y = static_cast(GF256_LOG_TABLE[y]);
const uint8_t log_yn = 255 - log_y;
// Next subtable
m += 256;
d += 256;
// Unroll x = 0
m[0] = 0;
d[0] = 0;
// Calculate x * y, x / y
for (int x = 1; x < 256; ++x)
{
uint16_t log_x = GF256_LOG_TABLE[x];
m[x] = GF256_EXP_TABLE[log_x + log_y];
d[x] = GF256_EXP_TABLE[log_x + log_yn];
}
}
}
//-----------------------------------------------------------------------------
// Inverse Table
// Initialize INV table using DIV table
void gf256_ctx::gf256_inv_init()
{
for (int x = 0; x < 256; ++x)
{
GF256_INV_TABLE[x] = gf256_div(1, static_cast(x));
}
}
//-----------------------------------------------------------------------------
// Multiply and Add Memory Tables
/*
Fast algorithm to compute m[1..8] = a[1..8] * b in GF(256)
using SSE3 SIMD instruction set:
Consider z = x * y in GF(256).
This operation can be performed bit-by-bit. Usefully, the partial product
of each bit is combined linearly with the rest. This means that the 8-bit
number x can be split into its high and low 4 bits, and partial products
can be formed from each half. Then the halves can be linearly combined:
z = x[0..3] * y + x[4..7] * y
The multiplication of each half can be done efficiently via table lookups,
and the addition in GF(256) is XOR. There must be two tables that map 16
input elements for the low or high 4 bits of x to the two partial products.
Each value for y has a different set of two tables:
z = TABLE_LO_y(x[0..3]) xor TABLE_HI_y(x[4..7])
This means that we need 16 * 2 * 256 = 8192 bytes for precomputed tables.
Computing z[] = x[] * y can be performed 16 bytes at a time by using the
128-bit register operations supported by modern processors.
This is efficiently realized in SSE3 using the _mm_shuffle_epi8() function
provided by Visual Studio 2010 or newer in . This function
uses the low bits to do a table lookup on each byte. Unfortunately the
high bit of each mask byte has the special feature that it clears the
output byte when it is set, so we need to make sure it's cleared by masking
off the high bit of each byte before using it:
clr_mask = _mm_set1_epi8(0x0f) = 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f
For the low half of the partial product, clear the high bit of each byte
and perform the table lookup:
p_lo = _mm_and_si128(x, clr_mask)
p_lo = _mm_shuffle_epi8(p_lo, TABLE_LO_y)
For the high half of the partial product, shift the high 4 bits of each
byte into the low 4 bits and clear the high bit of each byte, and then
perform the table lookup:
p_hi = _mm_srli_epi64(x, 4)
p_hi = _mm_and_si128(p_hi, clr_mask)
p_hi = _mm_shuffle_epi8(p_hi, TABLE_HI_y)
Finally add the two partial products to form the product, recalling that
addition is XOR in a Galois field:
result = _mm_xor_si128(p_lo, p_hi)
This crunches 16 bytes of x at a time, and the result can be stored in z.
*/
/*
Intrinsic reference:
SSE3, VS2010+, tmmintrin.h:
GF256_M128 _mm_shuffle_epi8(GF256_M128 a, GF256_M128 mask);
Emits the Supplemental Streaming SIMD Extensions 3 (SSSE3) instruction pshufb. This instruction shuffles 16-byte parameters from a 128-bit parameter.
Pseudo-code for PSHUFB (with 128 bit operands):
for i = 0 to 15 {
if (SRC[(i * 8)+7] = 1 ) then
DEST[(i*8)+7..(i*8)+0] <- 0;
else
index[3..0] <- SRC[(i*8)+3 .. (i*8)+0];
DEST[(i*8)+7..(i*8)+0] <- DEST[(index*8+7)..(index*8+0)];
endif
}
SSE2, VS2008+, emmintrin.h:
GF256_M128 _mm_slli_epi64 (GF256_M128 a, int count);
Shifts the 2 signed or unsigned 64-bit integers in a left by count bits while shifting in zeros.
GF256_M128 _mm_srli_epi64 (GF256_M128 a, int count);
Shifts the 2 signed or unsigned 64-bit integers in a right by count bits while shifting in zeros.
GF256_M128 _mm_set1_epi8 (char b);
Sets the 16 signed 8-bit integer values to b.
GF256_M128 _mm_and_si128 (GF256_M128 a, GF256_M128 b);
Computes the bitwise AND of the 128-bit value in a and the 128-bit value in b.
GF256_M128 _mm_xor_si128 ( GF256_M128 a, GF256_M128 b);
Computes the bitwise XOR of the 128-bit value in a and the 128-bit value in b.
*/
// Initialize the MM256 tables using gf256_mul()
void gf256_ctx::gf256_muladd_mem_init()
{
for (int y = 0; y < 256; ++y)
{
uint8_t lo[16], hi[16];
// TABLE_LO_Y maps 0..15 to 8-bit partial product based on y.
for (unsigned char x = 0; x < 16; ++x)
{
lo[x] = gf256_mul(x, static_cast( y ));
hi[x] = gf256_mul(x << 4, static_cast( y ));
}
const GF256_M128 table_lo = _mm_set_epi8(
lo[15], lo[14], lo[13], lo[12], lo[11], lo[10], lo[9], lo[8],
lo[7], lo[6], lo[5], lo[4], lo[3], lo[2], lo[1], lo[0]);
const GF256_M128 table_hi = _mm_set_epi8(
hi[15], hi[14], hi[13], hi[12], hi[11], hi[10], hi[9], hi[8],
hi[7], hi[6], hi[5], hi[4], hi[3], hi[2], hi[1], hi[0]);
_mm_store_si128(MM256_TABLE_LO_Y + y, table_lo);
_mm_store_si128(MM256_TABLE_HI_Y + y, table_hi);
}
}
//-----------------------------------------------------------------------------
// Initialization
//
// Initialize a context, filling in the tables.
//
// Thread-safety / Usage Notes:
//
// It is perfectly safe and encouraged to use a gf256_ctx object from multiple
// threads. The gf256_init() is relatively expensive and should only be done
// once, though it will take less than a millisecond.
//
// The gf256_ctx object must be aligned to 16 byte boundary.
// Simply tag the object with GF256_ALIGNED to achieve this.
//
// Example:
// static GF256_ALIGNED gf256_ctx TheGF256Context;
// gf256_init(&TheGF256Context, 0);
//
// Returns 0 on success and other values on failure.
int gf256_ctx::gf256_init_()
{
// Avoid multiple initialization
if (initialized)
{
return 0;
}
if (!IsLittleEndian())
{
fprintf(stderr, "gf256_ctx::gf256_init_: Little Endian architecture expected (code won't work without mods)\n");
return -2;
}
gf255_poly_init(DefaultPolynomialIndex);
gf256_explog_init();
gf256_muldiv_init();
gf256_inv_init();
gf256_muladd_mem_init();
initialized = true;
fprintf(stderr, "gf256_ctx::gf256_init_: initialized\n");
return 0;
}
//-----------------------------------------------------------------------------
// Operations with context
void gf256_ctx::gf256_mul_mem(void * GF256_RESTRICT vz, const void * GF256_RESTRICT vx, uint8_t y, int bytes)
{
// Use a single if-statement to handle special cases
if (y <= 1)
{
if (y == 0)
{
memset(vz, 0, bytes);
}
return;
}
// Partial product tables; see above
const GF256_M128 table_lo_y = _mm_load_si128(MM256_TABLE_LO_Y + y);
const GF256_M128 table_hi_y = _mm_load_si128(MM256_TABLE_HI_Y + y);
// clr_mask = 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f
const GF256_M128 clr_mask = _mm_set1_epi8(0x0f);
GF256_M128 * GF256_RESTRICT z16 = reinterpret_cast(vz);
const GF256_M128 * GF256_RESTRICT x16 = reinterpret_cast(vx);
// Handle multiples of 16 bytes
while (bytes >= 16)
{
// See above comments for details
GF256_M128 x0 = _mm_loadu_si128(x16);
GF256_M128 l0 = _mm_and_si128(x0, clr_mask);
x0 = _mm_srli_epi64(x0, 4);
GF256_M128 h0 = _mm_and_si128(x0, clr_mask);
l0 = _mm_shuffle_epi8(table_lo_y, l0);
h0 = _mm_shuffle_epi8(table_hi_y, h0);
_mm_storeu_si128(z16, _mm_xor_si128(l0, h0));
x16++;
z16++;
bytes -= 16;
}
uint8_t * GF256_RESTRICT z8 = reinterpret_cast(z16);
const uint8_t * GF256_RESTRICT x8 = reinterpret_cast(x16);
const uint8_t * GF256_RESTRICT table = GF256_MUL_TABLE + ((unsigned)y << 8);
// Handle a block of 8 bytes
if (bytes >= 8)
{
uint64_t word = table[x8[0]];
word |= (uint64_t)table[x8[1]] << 8;
word |= (uint64_t)table[x8[2]] << 16;
word |= (uint64_t)table[x8[3]] << 24;
word |= (uint64_t)table[x8[4]] << 32;
word |= (uint64_t)table[x8[5]] << 40;
word |= (uint64_t)table[x8[6]] << 48;
word |= (uint64_t)table[x8[7]] << 56;
*(uint64_t*)z8 = word;
x8 += 8;
z8 += 8;
bytes -= 8;
}
// Handle a block of 4 bytes
if (bytes >= 4)
{
uint32_t word = table[x8[0]];
word |= (uint32_t)table[x8[1]] << 8;
word |= (uint32_t)table[x8[2]] << 16;
word |= (uint32_t)table[x8[3]] << 24;
*(uint32_t*)z8 = word;
x8 += 4;
z8 += 4;
bytes -= 4;
}
// Handle single bytes
for (int i = bytes; i > 0; i--) {
z8[i-1] = table[x8[i-1]];
}
}
void gf256_ctx::gf256_muladd_mem(void * GF256_RESTRICT vz, uint8_t y, const void * GF256_RESTRICT vx, int bytes)
{
// Use a single if-statement to handle special cases
if (y <= 1)
{
if (y == 1)
{
gf256_add_mem(vz, vx, bytes);
}
return;
}
// Partial product tables; see above
const GF256_M128 table_lo_y = _mm_load_si128(MM256_TABLE_LO_Y + y);
const GF256_M128 table_hi_y = _mm_load_si128(MM256_TABLE_HI_Y + y);
// clr_mask = 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f
const GF256_M128 clr_mask = _mm_set1_epi8(0x0f);
GF256_M128 * GF256_RESTRICT z16 = reinterpret_cast(vz);
const GF256_M128 * GF256_RESTRICT x16 = reinterpret_cast(vx);
// Handle multiples of 16 bytes
while (bytes >= 16)
{
// See above comments for details
GF256_M128 x0 = _mm_loadu_si128(x16);
GF256_M128 l0 = _mm_and_si128(x0, clr_mask);
x0 = _mm_srli_epi64(x0, 4);
GF256_M128 h0 = _mm_and_si128(x0, clr_mask);
l0 = _mm_shuffle_epi8(table_lo_y, l0);
h0 = _mm_shuffle_epi8(table_hi_y, h0);
const GF256_M128 p0 = _mm_xor_si128(l0, h0);
const GF256_M128 z0 = _mm_loadu_si128(z16);
_mm_storeu_si128(z16, _mm_xor_si128(p0, z0));
x16++;
z16++;
bytes -= 16;
}
uint8_t * GF256_RESTRICT z8 = reinterpret_cast(z16);
const uint8_t * GF256_RESTRICT x8 = reinterpret_cast(x16);
const uint8_t * GF256_RESTRICT table = GF256_MUL_TABLE + ((unsigned)y << 8);
// Handle a block of 8 bytes
if (bytes >= 8)
{
uint64_t word = table[x8[0]];
word |= (uint64_t)table[x8[1]] << 8;
word |= (uint64_t)table[x8[2]] << 16;
word |= (uint64_t)table[x8[3]] << 24;
word |= (uint64_t)table[x8[4]] << 32;
word |= (uint64_t)table[x8[5]] << 40;
word |= (uint64_t)table[x8[6]] << 48;
word |= (uint64_t)table[x8[7]] << 56;
*(uint64_t*)z8 ^= word;
x8 += 8;
z8 += 8;
bytes -= 8;
}
// Handle a block of 4 bytes
if (bytes >= 4)
{
uint32_t word = table[x8[0]];
word |= (uint32_t)table[x8[1]] << 8;
word |= (uint32_t)table[x8[2]] << 16;
word |= (uint32_t)table[x8[3]] << 24;
*(uint32_t*)z8 ^= word;
x8 += 4;
z8 += 4;
bytes -= 4;
}
// Handle single bytes
for (int i = bytes; i > 0; i--) {
z8[i-1] ^= table[x8[i-1]];
}
}
//-----------------------------------------------------------------------------
// Static operations
void gf256_ctx::gf256_add_mem(void * GF256_RESTRICT vx, const void * GF256_RESTRICT vy, int bytes)
{
GF256_M128 * GF256_RESTRICT x16 = reinterpret_cast(vx);
const GF256_M128 * GF256_RESTRICT y16 = reinterpret_cast(vy);
// Handle multiples of 64 bytes
while (bytes >= 64)
{
GF256_M128 x0 = _mm_loadu_si128(x16);
GF256_M128 x1 = _mm_loadu_si128(x16 + 1);
GF256_M128 x2 = _mm_loadu_si128(x16 + 2);
GF256_M128 x3 = _mm_loadu_si128(x16 + 3);
GF256_M128 y0 = _mm_loadu_si128(y16);
GF256_M128 y1 = _mm_loadu_si128(y16 + 1);
GF256_M128 y2 = _mm_loadu_si128(y16 + 2);
GF256_M128 y3 = _mm_loadu_si128(y16 + 3);
_mm_storeu_si128(x16,
_mm_xor_si128(x0, y0));
_mm_storeu_si128(x16 + 1,
_mm_xor_si128(x1, y1));
_mm_storeu_si128(x16 + 2,
_mm_xor_si128(x2, y2));
_mm_storeu_si128(x16 + 3,
_mm_xor_si128(x3, y3));
x16 += 4;
y16 += 4;
bytes -= 64;
}
// Handle multiples of 16 bytes
while (bytes >= 16)
{
// x[i] = x[i] xor y[i]
_mm_storeu_si128(x16,
_mm_xor_si128(
_mm_loadu_si128(x16),
_mm_loadu_si128(y16)));
x16++;
y16++;
bytes -= 16;
}
uint8_t * GF256_RESTRICT x1 = reinterpret_cast(x16);
const uint8_t * GF256_RESTRICT y1 = reinterpret_cast(y16);
// Handle a block of 8 bytes
if (bytes >= 8)
{
uint64_t * GF256_RESTRICT x8 = reinterpret_cast(x1);
const uint64_t * GF256_RESTRICT y8 = reinterpret_cast(y1);
*x8 ^= *y8;
x1 += 8;
y1 += 8;
bytes -= 8;
}
// Handle a block of 4 bytes
if (bytes >= 4)
{
uint32_t * GF256_RESTRICT x4 = reinterpret_cast(x1);
const uint32_t * GF256_RESTRICT y4 = reinterpret_cast(y1);
*x4 ^= *y4;
x1 += 4;
y1 += 4;
bytes -= 4;
}
// Handle final bytes
for (int i = bytes; i > 0; i--) {
x1[i-1] ^= y1[i-1];
}
}
void gf256_ctx::gf256_add2_mem(void * GF256_RESTRICT vz, const void * GF256_RESTRICT vx, const void * GF256_RESTRICT vy, int bytes)
{
GF256_M128 * GF256_RESTRICT z16 = reinterpret_cast(vz);
const GF256_M128 * GF256_RESTRICT x16 = reinterpret_cast(vx);
const GF256_M128 * GF256_RESTRICT y16 = reinterpret_cast(vy);
// Handle multiples of 16 bytes
while (bytes >= 16)
{
// z[i] = x[i] xor y[i]
_mm_storeu_si128(z16,
_mm_xor_si128(
_mm_loadu_si128(z16),
_mm_xor_si128(
_mm_loadu_si128(x16),
_mm_loadu_si128(y16))));
x16++;
y16++;
z16++;
bytes -= 16;
}
uint8_t * GF256_RESTRICT z1 = reinterpret_cast(z16);
const uint8_t * GF256_RESTRICT x1 = reinterpret_cast(x16);
const uint8_t * GF256_RESTRICT y1 = reinterpret_cast(y16);
// Handle a block of 8 bytes
if (bytes >= 8)
{
uint64_t * GF256_RESTRICT z8 = reinterpret_cast(z1);
const uint64_t * GF256_RESTRICT x8 = reinterpret_cast(x1);
const uint64_t * GF256_RESTRICT y8 = reinterpret_cast(y1);
*z8 ^= *x8 ^ *y8;
x1 += 8;
y1 += 8;
z1 += 8;
bytes -= 8;
}
// Handle a block of 4 bytes
if (bytes >= 4)
{
uint32_t * GF256_RESTRICT z4 = reinterpret_cast(z1);
const uint32_t * GF256_RESTRICT x4 = reinterpret_cast(x1);
const uint32_t * GF256_RESTRICT y4 = reinterpret_cast(y1);
*z4 ^= *x4 ^ *y4;
x1 += 4;
y1 += 4;
z1 += 4;
bytes -= 4;
}
// Handle final bytes
for (int i = bytes; i > 0; i--) {
z1[i-1] ^= x1[i-1] ^ y1[i-1];
}
}
void gf256_ctx::gf256_addset_mem(void * GF256_RESTRICT vz, const void * GF256_RESTRICT vx, const void * GF256_RESTRICT vy, int bytes)
{
GF256_M128 * GF256_RESTRICT z16 = reinterpret_cast(vz);
const GF256_M128 * GF256_RESTRICT x16 = reinterpret_cast(vx);
const GF256_M128 * GF256_RESTRICT y16 = reinterpret_cast(vy);
// Handle multiples of 64 bytes
while (bytes >= 64)
{
GF256_M128 x0 = _mm_loadu_si128(x16);
GF256_M128 x1 = _mm_loadu_si128(x16 + 1);
GF256_M128 x2 = _mm_loadu_si128(x16 + 2);
GF256_M128 x3 = _mm_loadu_si128(x16 + 3);
GF256_M128 y0 = _mm_loadu_si128(y16);
GF256_M128 y1 = _mm_loadu_si128(y16 + 1);
GF256_M128 y2 = _mm_loadu_si128(y16 + 2);
GF256_M128 y3 = _mm_loadu_si128(y16 + 3);
_mm_storeu_si128(z16, _mm_xor_si128(x0, y0));
_mm_storeu_si128(z16 + 1, _mm_xor_si128(x1, y1));
_mm_storeu_si128(z16 + 2, _mm_xor_si128(x2, y2));
_mm_storeu_si128(z16 + 3, _mm_xor_si128(x3, y3));
x16 += 4;
y16 += 4;
z16 += 4;
bytes -= 64;
}
// Handle multiples of 16 bytes
while (bytes >= 16)
{
// z[i] = x[i] xor y[i]
_mm_storeu_si128(z16,
_mm_xor_si128(
_mm_loadu_si128(x16),
_mm_loadu_si128(y16)));
x16++;
y16++;
z16++;
bytes -= 16;
}
uint8_t * GF256_RESTRICT z1 = reinterpret_cast(z16);
const uint8_t * GF256_RESTRICT x1 = reinterpret_cast(x16);
const uint8_t * GF256_RESTRICT y1 = reinterpret_cast(y16);
// Handle a block of 8 bytes
if (bytes >= 8)
{
uint64_t * GF256_RESTRICT z8 = reinterpret_cast(z1);
const uint64_t * GF256_RESTRICT x8 = reinterpret_cast(x1);
const uint64_t * GF256_RESTRICT y8 = reinterpret_cast(y1);
*z8 = *x8 ^ *y8;
x1 += 8;
y1 += 8;
z1 += 8;
bytes -= 8;
}
// Handle a block of 4 bytes
if (bytes >= 4)
{
uint32_t * GF256_RESTRICT z4 = reinterpret_cast(z1);
const uint32_t * GF256_RESTRICT x4 = reinterpret_cast(x1);
const uint32_t * GF256_RESTRICT y4 = reinterpret_cast(y1);
*z4 = *x4 ^ *y4;
x1 += 4;
y1 += 4;
z1 += 4;
bytes -= 4;
}
// Handle final bytes
for (int i = bytes; i > 0; i--) {
z1[i-1] = x1[i-1] ^ y1[i-1];
}
}
void gf256_memswap(void * GF256_RESTRICT vx, void * GF256_RESTRICT vy, int bytes)
{
GF256_M128 * GF256_RESTRICT x16 = reinterpret_cast(vx);
GF256_M128 * GF256_RESTRICT y16 = reinterpret_cast(vy);
// Handle blocks of 16 bytes
while (bytes >= 16)
{
GF256_M128 x0 = _mm_loadu_si128(x16);
GF256_M128 y0 = _mm_loadu_si128(y16);
_mm_storeu_si128(x16, y0);
_mm_storeu_si128(y16, x0);
bytes -= 16;
++x16;
++y16;
}
uint8_t * GF256_RESTRICT x1 = reinterpret_cast(x16);
uint8_t * GF256_RESTRICT y1 = reinterpret_cast(y16);
// Handle a block of 8 bytes
if (bytes >= 8)
{
uint64_t * GF256_RESTRICT x8 = reinterpret_cast(x1);
uint64_t * GF256_RESTRICT y8 = reinterpret_cast(y1);
uint64_t temp = *x8;
*x8 = *y8;
*y8 = temp;
x1 += 8;
y1 += 8;
bytes -= 8;
}
// Handle a block of 4 bytes
if (bytes >= 4)
{
uint32_t * GF256_RESTRICT x4 = reinterpret_cast(x1);
uint32_t * GF256_RESTRICT y4 = reinterpret_cast(y1);
uint32_t temp = *x4;
*x4 = *y4;
*y4 = temp;
x1 += 4;
y1 += 4;
bytes -= 4;
}
// Handle final bytes
uint8_t temp;
for (int i = bytes; i > 0; i--) {
temp = x1[i-1]; x1[i-1] = y1[i-1]; y1[i-1] = temp;
}
}
cm256cc-1.1.0/gf256.h 0000664 0000000 0000000 00000020231 14010511322 0013640 0 ustar 00root root 0000000 0000000 /*
Copyright (c) 2015 Christopher A. Taylor. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of CM256 nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GF256_H
#define GF256_H
#include // uint32_t etc
#include // memcpy, memset
#include "export.h"
// TBD: Fix the polynomial at one value and use precomputed tables here to
// simplify the API for GF256.h version 2. Avoids user data alignment issues.
//-----------------------------------------------------------------------------
// Platform-Specific Definitions
//
// Edit these to port to your architecture
#if defined(USE_SSSE3)
#ifdef _MSC_VER
// Compiler-specific 128-bit SIMD register keyword
#define GF256_M128 __m128i
// Compiler-specific C++11 restrict keyword
#define GF256_RESTRICT_KW __restrict
// Compiler-specific force inline keyword
#define GF256_FORCE_INLINE __forceinline
// Compiler-specific alignment keyword
#define GF256_ALIGNED __declspec(align(16))
// Compiler-specific SSE headers
#include // SSE3: _mm_shuffle_epi8
#include // SSE2
#else
// Compiler-specific 128-bit SIMD register keyword
#define GF256_M128 __m128i
// Compiler-specific C++11 restrict keyword
#define GF256_RESTRICT_KW __restrict__
// Compiler-specific force inline keyword
#define GF256_FORCE_INLINE __attribute__((always_inline)) inline
// Compiler-specific alignment keyword
#define GF256_ALIGNED __attribute__((aligned(16)))
// Compiler-specific SSE headers
#include
#endif
#elif defined(USE_NEON)
#include "sse2neon.h"
// Compiler-specific 128-bit SIMD register keyword
#define GF256_M128 __m128i
// Compiler-specific C++11 restrict keyword
#define GF256_RESTRICT_KW __restrict__
// Compiler-specific force inline keyword
#define GF256_FORCE_INLINE __attribute__((always_inline)) inline
// Compiler-specific alignment keyword
#define GF256_ALIGNED __attribute__((aligned(16)))
#endif
#if defined(NO_RESTRICT)
#define GF256_RESTRICT
#else
#define GF256_RESTRICT GF256_RESTRICT_KW
#endif
//-----------------------------------------------------------------------------
// GF(256) Context
//
// The context object stores tables required to perform library calculations.
//
// Usage Notes:
// This struct should be aligned in memory, meaning that a pointer to it should
// have the low 4 bits cleared. To achieve this simply tag the gf256_ctx object
// with the GF256_ALIGNED macro provided above.
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable: 4324) // warning C4324: 'gf256_ctx' : structure was padded due to __declspec(align())
#endif
class CM256CC_API gf256_ctx // 141,072 bytes
{
public:
gf256_ctx();
~gf256_ctx();
bool isInitialized() const { return initialized; }
/** Performs "x[] += y[]" bulk memory XOR operation */
static void gf256_add_mem(void * GF256_RESTRICT vx, const void * GF256_RESTRICT vy, int bytes);
/** Performs "z[] += x[] + y[]" bulk memory operation */
static void gf256_add2_mem(void * GF256_RESTRICT vz, const void * GF256_RESTRICT vx, const void * GF256_RESTRICT vy, int bytes);
/** Performs "z[] = x[] + y[]" bulk memory operation */
static void gf256_addset_mem(void * GF256_RESTRICT vz, const void * GF256_RESTRICT vx, const void * GF256_RESTRICT vy, int bytes);
/** Swap two memory buffers in-place */
static void gf256_memswap(void * GF256_RESTRICT vx, void * GF256_RESTRICT vy, int bytes);
// return x + y
static GF256_FORCE_INLINE uint8_t gf256_add(const uint8_t x, const uint8_t y)
{
return x ^ y;
}
// return x * y
// For repeated multiplication by a constant, it is faster to put the constant in y.
GF256_FORCE_INLINE uint8_t gf256_mul(uint8_t x, uint8_t y)
{
return GF256_MUL_TABLE[((unsigned)y << 8) + x];
}
// return x / y
// Memory-access optimized for constant divisors in y.
GF256_FORCE_INLINE uint8_t gf256_div(uint8_t x, uint8_t y)
{
return GF256_DIV_TABLE[((unsigned)y << 8) + x];
}
// return 1 / x
GF256_FORCE_INLINE uint8_t gf256_inv(uint8_t x)
{
return GF256_INV_TABLE[x];
}
// This function generates each matrix element based on x_i, x_0, y_j
// Note that for x_i == x_0, this will return 1, so it is better to unroll out the first row.
GF256_FORCE_INLINE unsigned char getMatrixElement(const unsigned char x_i, const unsigned char x_0, const unsigned char y_j)
{
return gf256_div(gf256_add(y_j, x_0), gf256_add(x_i, y_j));
}
/** Performs "z[] = x[] * y" bulk memory operation */
void gf256_mul_mem(void * GF256_RESTRICT vz, const void * GF256_RESTRICT vx, uint8_t y, int bytes);
/** Performs "z[] += x[] * y" bulk memory operation */
void gf256_muladd_mem(void * GF256_RESTRICT vz, uint8_t y, const void * GF256_RESTRICT vx, int bytes);
/** Performs "x[] /= y" bulk memory operation */
GF256_FORCE_INLINE void gf256_div_mem(void * GF256_RESTRICT vz,
const void * GF256_RESTRICT vx, uint8_t y, int bytes)
{
gf256_mul_mem(vz, vx, GF256_INV_TABLE[y], bytes); // Multiply by inverse
}
// Polynomial used
unsigned Polynomial;
// Log/Exp tables
uint16_t GF256_LOG_TABLE[256];
uint8_t GF256_EXP_TABLE[512 * 2 + 1];
// Mul/Div/Inv tables
uint8_t GF256_MUL_TABLE[256 * 256];
uint8_t GF256_DIV_TABLE[256 * 256];
uint8_t GF256_INV_TABLE[256];
// Muladd_mem tables
// We require memory to be aligned since the SIMD instructions benefit from
// aligned accesses to the MM256_* table data.
GF256_ALIGNED GF256_M128 MM256_TABLE_LO_Y[256];
GF256_ALIGNED GF256_M128 MM256_TABLE_HI_Y[256];
private:
int gf256_init_();
void gf255_poly_init(int polynomialIndex); //!< Select which polynomial to use
void gf256_explog_init(); //!< Construct EXP and LOG tables from polynomial
void gf256_muldiv_init(); //!< Initialize MUL and DIV tables using LOG and EXP tables
void gf256_inv_init(); //!< Initialize INV table using DIV table
void gf256_muladd_mem_init(); //!< Initialize the MM256 tables using gf256_mul()
static bool IsLittleEndian()
{
int x = 1;
char *y = (char *) &x;
return *y != 0;
}
//-----------------------------------------------------------------------------
// Generator Polynomial
// There are only 16 irreducible polynomials for GF(256)
static const int GF256_GEN_POLY_COUNT = 16;
static const uint8_t GF256_GEN_POLY[GF256_GEN_POLY_COUNT];
static const int DefaultPolynomialIndex = 3;
bool initialized;
};
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#endif // GF256_H
cm256cc-1.1.0/libcm256cc.pc.in 0000664 0000000 0000000 00000000521 14010511322 0015420 0 ustar 00root root 0000000 0000000 prefix=@CMAKE_INSTALL_PREFIX@
exec_prefix=${prefix}
libdir=${exec_prefix}/@CMAKE_INSTALL_LIBDIR@
includedir=${prefix}/include
Name: cm256cc library
Description: Fast GF(256) Cauchy MDS Block Erasure Codec in C++
Version: @VERSION@
Cflags: -I${includedir}/ @CM256CC_PC_CFLAGS@
Libs: -L${libdir} -lcm256cc
Libs.private: @CM256CC_PC_LIBS@
cm256cc-1.1.0/sse2neon.h 0000664 0000000 0000000 00000120512 14010511322 0014546 0 ustar 00root root 0000000 0000000 #ifndef SSE2NEON_H_
#define SSE2NEON_H_
#ifndef SSE2NEON_H
#define SSE2NEON_H
// This header file provides a simple API translation layer
// between SSE intrinsics to their corresponding ARM NEON versions
//
// This header file does not (yet) translate *all* of the SSE intrinsics.
// Since this is in support of a specific porting effort, I have only
// included the intrinsics I needed to get my port to work.
//
// Questions/Comments/Feedback send to: jratcliffscarab@gmail.com
//
// If you want to improve or add to this project, send me an
// email and I will probably approve your access to the depot.
//
// Project is located here:
//
// https://github.com/jratcliff63367/sse2neon
//
// Show your appreciation for open source by sending me a bitcoin tip to the following
// address.
//
// TipJar: 1PzgWDSyq4pmdAXRH8SPUtta4SWGrt4B1p :
// https://blockchain.info/address/1PzgWDSyq4pmdAXRH8SPUtta4SWGrt4B1p
//
//
// Contributors to this project are:
//
// John W. Ratcliff : jratcliffscarab@gmail.com
// Brandon Rowlett : browlett@nvidia.com
// Ken Fast : kfast@gdeb.com
//
//
/*
** The MIT license:
**
** Permission is hereby granted, MEMALLOC_FREE of charge, to any person obtaining a copy
** of this software and associated documentation files (the "Software"), to deal
** in the Software without restriction, including without limitation the rights
** to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
** copies of the Software, and to permit persons to whom the Software is furnished
** to do so, subject to the following conditions:
**
** The above copyright notice and this permission notice shall be included in all
** copies or substantial portions of the Software.
** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
** WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
** CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#define GCC 1
#define ENABLE_CPP_VERSION 0
#if GCC
#define FORCE_INLINE inline __attribute__((always_inline))
#else
#define FORCE_INLINE inline
#endif
#include "arm_neon.h"
/*******************************************************/
/* MACRO for shuffle parameter for _mm_shuffle_ps(). */
/* Argument fp3 is a digit[0123] that represents the fp*/
/* from argument "b" of mm_shuffle_ps that will be */
/* placed in fp3 of result. fp2 is the same for fp2 in */
/* result. fp1 is a digit[0123] that represents the fp */
/* from argument "a" of mm_shuffle_ps that will be */
/* places in fp1 of result. fp0 is the same for fp0 of */
/* result */
/*******************************************************/
#define _MM_SHUFFLE(fp3,fp2,fp1,fp0) (((fp3) << 6) | ((fp2) << 4) | \
((fp1) << 2) | ((fp0)))
typedef float32x4_t __m128;
typedef int32x4_t __m128i;
// ******************************************
// Set/get methods
// ******************************************
// Sets the 128-bit value to zero https://msdn.microsoft.com/en-us/library/vstudio/ys7dw0kh(v=vs.100).aspx
FORCE_INLINE __m128i _mm_setzero_si128()
{
return vdupq_n_s32(0);
}
// Clears the four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/vstudio/tk1t2tbz(v=vs.100).aspx
FORCE_INLINE __m128 _mm_setzero_ps(void)
{
return vdupq_n_f32(0);
}
// Sets the four single-precision, floating-point values to w. https://msdn.microsoft.com/en-us/library/vstudio/2x1se8ha(v=vs.100).aspx
FORCE_INLINE __m128 _mm_set1_ps(float _w)
{
return vdupq_n_f32(_w);
}
// Sets the four single-precision, floating-point values to w. https://msdn.microsoft.com/en-us/library/vstudio/2x1se8ha(v=vs.100).aspx
FORCE_INLINE __m128 _mm_set_ps1(float _w)
{
return vdupq_n_f32(_w);
}
// Sets the four single-precision, floating-point values to the four inputs. https://msdn.microsoft.com/en-us/library/vstudio/afh0zf75(v=vs.100).aspx
FORCE_INLINE __m128 _mm_set_ps(float w, float z, float y, float x)
{
float __attribute__((aligned(16))) data[4] = { x, y, z, w };
return vld1q_f32(data);
}
// Sets the four single-precision, floating-point values to the four inputs in reverse order. https://msdn.microsoft.com/en-us/library/vstudio/d2172ct3(v=vs.100).aspx
FORCE_INLINE __m128 _mm_setr_ps(float w, float z , float y , float x )
{
float __attribute__ ((aligned (16))) data[4] = { w, z, y, x };
return vld1q_f32(data);
}
// Sets the 4 signed 32-bit integer values to i. https://msdn.microsoft.com/en-us/library/vstudio/h4xscxat(v=vs.100).aspx
FORCE_INLINE __m128i _mm_set1_epi32(int _i)
{
return vdupq_n_s32(_i);
}
// Sets the 4 signed 32-bit integer values. https://msdn.microsoft.com/en-us/library/vstudio/019beekt(v=vs.100).aspx
FORCE_INLINE __m128i _mm_set_epi32(int i3, int i2, int i1, int i0)
{
int32_t __attribute__((aligned(16))) data[4] = { i0, i1, i2, i3 };
return vld1q_s32(data);
}
// Stores four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/vstudio/s3h4ay6y(v=vs.100).aspx
FORCE_INLINE void _mm_store_ps(float *p, __m128 a)
{
vst1q_f32(p, a);
}
// Stores four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/44e30x22(v=vs.100).aspx
FORCE_INLINE void _mm_storeu_ps(float *p, __m128 a)
{
vst1q_f32(p, a);
}
// Stores four 32-bit integer values as (as a __m128i value) at the address p. https://msdn.microsoft.com/en-us/library/vstudio/edk11s13(v=vs.100).aspx
FORCE_INLINE void _mm_store_si128(__m128i *p, __m128i a )
{
vst1q_s32((int32_t*) p,a);
}
// Stores the lower single - precision, floating - point value. https://msdn.microsoft.com/en-us/library/tzz10fbx(v=vs.100).aspx
FORCE_INLINE void _mm_store_ss(float *p, __m128 a)
{
vst1q_lane_f32(p, a, 0);
}
// Reads the lower 64 bits of b and stores them into the lower 64 bits of a. https://msdn.microsoft.com/en-us/library/hhwf428f%28v=vs.90%29.aspx
FORCE_INLINE void _mm_storel_epi64(__m128i* a, __m128i b)
{
*a = (__m128i)vsetq_lane_s64((int64_t)vget_low_s32(b), *(int64x2_t*)a, 0);
}
// Loads a single single-precision, floating-point value, copying it into all four words https://msdn.microsoft.com/en-us/library/vstudio/5cdkf716(v=vs.100).aspx
FORCE_INLINE __m128 _mm_load1_ps(const float * p)
{
return vld1q_dup_f32(p);
}
// Loads four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/vstudio/zzd50xxt(v=vs.100).aspx
FORCE_INLINE __m128 _mm_load_ps(const float * p)
{
return vld1q_f32(p);
}
// Loads four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/x1b16s7z%28v=vs.90%29.aspx
FORCE_INLINE __m128 _mm_loadu_ps(const float * p)
{
// for neon, alignment doesn't matter, so _mm_load_ps and _mm_loadu_ps are equivalent for neon
return vld1q_f32(p);
}
// Loads an single - precision, floating - point value into the low word and clears the upper three words. https://msdn.microsoft.com/en-us/library/548bb9h4%28v=vs.90%29.aspx
FORCE_INLINE __m128 _mm_load_ss(const float * p)
{
__m128 result = vdupq_n_f32(0);
return vsetq_lane_f32(*p, result, 0);
}
// ******************************************
// Logic/Binary operations
// ******************************************
// Compares for inequality. https://msdn.microsoft.com/en-us/library/sf44thbx(v=vs.100).aspx
FORCE_INLINE __m128 _mm_cmpneq_ps(__m128 a, __m128 b)
{
return (__m128)vmvnq_s32((__m128i)vceqq_f32(a, b));
}
// Computes the bitwise AND-NOT of the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/68h7wd02(v=vs.100).aspx
FORCE_INLINE __m128 _mm_andnot_ps(__m128 a, __m128 b)
{
return (__m128)vbicq_s32((__m128i)b, (__m128i)a); // *NOTE* argument swap
}
// Computes the bitwise AND of the 128-bit value in b and the bitwise NOT of the 128-bit value in a. https://msdn.microsoft.com/en-us/library/vstudio/1beaceh8(v=vs.100).aspx
FORCE_INLINE __m128i _mm_andnot_si128(__m128i a, __m128i b)
{
return (__m128i)vbicq_s32(b, a); // *NOTE* argument swap
}
// Computes the bitwise AND of the 128-bit value in a and the 128-bit value in b. https://msdn.microsoft.com/en-us/library/vstudio/6d1txsa8(v=vs.100).aspx
FORCE_INLINE __m128i _mm_and_si128(__m128i a, __m128i b)
{
return (__m128i)vandq_s32(a, b);
}
// Computes the bitwise AND of the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/73ck1xc5(v=vs.100).aspx
FORCE_INLINE __m128 _mm_and_ps(__m128 a, __m128 b)
{
return (__m128)vandq_s32((__m128i)a, (__m128i)b);
}
// Computes the bitwise OR of the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/7ctdsyy0(v=vs.100).aspx
FORCE_INLINE __m128 _mm_or_ps(__m128 a, __m128 b)
{
return (__m128)vorrq_s32((__m128i)a, (__m128i)b);
}
// Computes bitwise EXOR (exclusive-or) of the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/ss6k3wk8(v=vs.100).aspx
FORCE_INLINE __m128 _mm_xor_ps(__m128 a, __m128 b)
{
return (__m128)veorq_s32((__m128i)a, (__m128i)b);
}
// Computes the bitwise OR of the 128-bit value in a and the 128-bit value in b. https://msdn.microsoft.com/en-us/library/vstudio/ew8ty0db(v=vs.100).aspx
FORCE_INLINE __m128i _mm_or_si128(__m128i a, __m128i b)
{
return (__m128i)vorrq_s32(a, b);
}
// Computes the bitwise XOR of the 128-bit value in a and the 128-bit value in b. https://msdn.microsoft.com/en-us/library/fzt08www(v=vs.100).aspx
FORCE_INLINE __m128i _mm_xor_si128(__m128i a, __m128i b)
{
return veorq_s32(a, b);
}
// NEON does not provide this method
// Creates a 4-bit mask from the most significant bits of the four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/vstudio/4490ys29(v=vs.100).aspx
FORCE_INLINE int _mm_movemask_ps(__m128 a)
{
#if ENABLE_CPP_VERSION // I am not yet convinced that the NEON version is faster than the C version of this
uint32x4_t &ia = *(uint32x4_t *)&a;
return (ia[0] >> 31) | ((ia[1] >> 30) & 2) | ((ia[2] >> 29) & 4) | ((ia[3] >> 28) & 8);
#else
static const uint32x4_t movemask = { 1, 2, 4, 8 };
static const uint32x4_t highbit = { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
uint32x4_t t0 = vreinterpretq_u32_f32(a);
uint32x4_t t1 = vtstq_u32(t0, highbit);
uint32x4_t t2 = vandq_u32(t1, movemask);
uint32x2_t t3 = vorr_u32(vget_low_u32(t2), vget_high_u32(t2));
return vget_lane_u32(t3, 0) | vget_lane_u32(t3, 1);
#endif
}
// Takes the upper 64 bits of a and places it in the low end of the result
// Takes the lower 64 bits of b and places it into the high end of the result.
FORCE_INLINE __m128 _mm_shuffle_ps_1032(__m128 a, __m128 b)
{
return vcombine_f32(vget_high_f32(a), vget_low_f32(b));
}
// takes the lower two 32-bit values from a and swaps them and places in high end of result
// takes the higher two 32 bit values from b and swaps them and places in low end of result.
FORCE_INLINE __m128 _mm_shuffle_ps_2301(__m128 a, __m128 b)
{
return vcombine_f32(vrev64_f32(vget_low_f32(a)), vrev64_f32(vget_high_f32(b)));
}
// keeps the low 64 bits of b in the low and puts the high 64 bits of a in the high
FORCE_INLINE __m128 _mm_shuffle_ps_3210(__m128 a, __m128 b)
{
return vcombine_f32(vget_low_f32(a), vget_high_f32(b));
}
FORCE_INLINE __m128 _mm_shuffle_ps_0011(__m128 a, __m128 b)
{
return vcombine_f32(vdup_n_f32(vgetq_lane_f32(a, 1)), vdup_n_f32(vgetq_lane_f32(b, 0)));
}
FORCE_INLINE __m128 _mm_shuffle_ps_0022(__m128 a, __m128 b)
{
return vcombine_f32(vdup_n_f32(vgetq_lane_f32(a, 2)), vdup_n_f32(vgetq_lane_f32(b, 0)));
}
FORCE_INLINE __m128 _mm_shuffle_ps_2200(__m128 a, __m128 b)
{
return vcombine_f32(vdup_n_f32(vgetq_lane_f32(a, 0)), vdup_n_f32(vgetq_lane_f32(b, 2)));
}
FORCE_INLINE __m128 _mm_shuffle_ps_3202(__m128 a, __m128 b)
{
float32_t a0 = vgetq_lane_f32(a, 0);
float32_t a2 = vgetq_lane_f32(a, 2);
float32x2_t aVal = vdup_n_f32(a2);
aVal = vset_lane_f32(a0, aVal, 1);
return vcombine_f32(aVal, vget_high_f32(b));
}
FORCE_INLINE __m128 _mm_shuffle_ps_1133(__m128 a, __m128 b)
{
return vcombine_f32(vdup_n_f32(vgetq_lane_f32(a, 3)), vdup_n_f32(vgetq_lane_f32(b, 1)));
}
FORCE_INLINE __m128 _mm_shuffle_ps_2010(__m128 a, __m128 b)
{
float32_t b0 = vgetq_lane_f32(b, 0);
float32_t b2 = vgetq_lane_f32(b, 2);
float32x2_t bVal = vdup_n_f32(b0);
bVal = vset_lane_f32(b2, bVal, 1);
return vcombine_f32(vget_low_f32(a), bVal);
}
FORCE_INLINE __m128 _mm_shuffle_ps_2001(__m128 a, __m128 b)
{
float32_t b0 = vgetq_lane_f32(b, 0);
float32_t b2 = vgetq_lane_f32(b, 2);
float32x2_t bVal = vdup_n_f32(b0);
bVal = vset_lane_f32(b2, bVal, 1);
return vcombine_f32(vrev64_f32(vget_low_f32(a)), bVal);
}
FORCE_INLINE __m128 _mm_shuffle_ps_2032(__m128 a, __m128 b)
{
float32_t b0 = vgetq_lane_f32(b, 0);
float32_t b2 = vgetq_lane_f32(b, 2);
float32x2_t bVal = vdup_n_f32(b0);
bVal = vset_lane_f32(b2, bVal, 1);
return vcombine_f32(vget_high_f32(a), bVal);
}
// NEON does not support a general purpose permute intrinsic
// Currently I am not sure whether the C implementation is faster or slower than the NEON version.
// Note, this has to be expanded as a template because the shuffle value must be an immediate value.
// The same is true on SSE as well.
// Selects four specific single-precision, floating-point values from a and b, based on the mask i. https://msdn.microsoft.com/en-us/library/vstudio/5f0858x0(v=vs.100).aspx
template
FORCE_INLINE __m128 _mm_shuffle_ps_default(__m128 a, __m128 b)
{
#if ENABLE_CPP_VERSION // I am not convinced that the NEON version is faster than the C version yet.
__m128 ret;
ret[0] = a[i & 0x3];
ret[1] = a[(i >> 2) & 0x3];
ret[2] = b[(i >> 4) & 0x03];
ret[3] = b[(i >> 6) & 0x03];
return ret;
#else
__m128 ret = vmovq_n_f32(vgetq_lane_f32(a, i & 0x3));
ret = vsetq_lane_f32(vgetq_lane_f32(a, (i >> 2) & 0x3), ret, 1);
ret = vsetq_lane_f32(vgetq_lane_f32(b, (i >> 4) & 0x3), ret, 2);
ret = vsetq_lane_f32(vgetq_lane_f32(b, (i >> 6) & 0x3), ret, 3);
return ret;
#endif
}
template
FORCE_INLINE __m128 _mm_shuffle_ps_function(__m128 a, __m128 b)
{
switch (i)
{
case _MM_SHUFFLE(1, 0, 3, 2): return _mm_shuffle_ps_1032(a, b); break;
case _MM_SHUFFLE(2, 3, 0, 1): return _mm_shuffle_ps_2301(a, b); break;
case _MM_SHUFFLE(3, 2, 1, 0): return _mm_shuffle_ps_3210(a, b); break;
case _MM_SHUFFLE(0, 0, 1, 1): return _mm_shuffle_ps_0011(a, b); break;
case _MM_SHUFFLE(0, 0, 2, 2): return _mm_shuffle_ps_0022(a, b); break;
case _MM_SHUFFLE(2, 2, 0, 0): return _mm_shuffle_ps_2200(a, b); break;
case _MM_SHUFFLE(3, 2, 0, 2): return _mm_shuffle_ps_3202(a, b); break;
case _MM_SHUFFLE(1, 1, 3, 3): return _mm_shuffle_ps_1133(a, b); break;
case _MM_SHUFFLE(2, 0, 1, 0): return _mm_shuffle_ps_2010(a, b); break;
case _MM_SHUFFLE(2, 0, 0, 1): return _mm_shuffle_ps_2001(a, b); break;
case _MM_SHUFFLE(2, 0, 3, 2): return _mm_shuffle_ps_2032(a, b); break;
default: _mm_shuffle_ps_default(a, b);
}
}
#define _mm_shuffle_ps(a,b,i) _mm_shuffle_ps_function(a,b)
// Takes the upper 64 bits of a and places it in the low end of the result
// Takes the lower 64 bits of b and places it into the high end of the result.
FORCE_INLINE __m128i _mm_shuffle_epi_1032(__m128i a, __m128i b)
{
return vcombine_s32(vget_high_s32(a), vget_low_s32(b));
}
// takes the lower two 32-bit values from a and swaps them and places in low end of result
// takes the higher two 32 bit values from b and swaps them and places in high end of result.
FORCE_INLINE __m128i _mm_shuffle_epi_2301(__m128i a, __m128i b)
{
return vcombine_s32(vrev64_s32(vget_low_s32(a)), vrev64_s32(vget_high_s32(b)));
}
// shift a right by 32 bits, and put the lower 32 bits of a into the upper 32 bits of b
// when a and b are the same, rotates the least significant 32 bits into the most signficant 32 bits, and shifts the rest down
FORCE_INLINE __m128i _mm_shuffle_epi_0321(__m128i a, __m128i b)
{
return vextq_s32(a, b, 1);
}
// shift a left by 32 bits, and put the upper 32 bits of b into the lower 32 bits of a
// when a and b are the same, rotates the most significant 32 bits into the least signficant 32 bits, and shifts the rest up
FORCE_INLINE __m128i _mm_shuffle_epi_2103(__m128i a, __m128i b)
{
return vextq_s32(a, b, 3);
}
// gets the lower 64 bits of a, and places it in the upper 64 bits
// gets the lower 64 bits of b and places it in the lower 64 bits
FORCE_INLINE __m128i _mm_shuffle_epi_1010(__m128i a, __m128i b)
{
return vcombine_s32(vget_low_s32(a), vget_low_s32(b));
}
// gets the lower 64 bits of a, and places it in the upper 64 bits
// gets the lower 64 bits of b, swaps the 0 and 1 elements, and places it in the lower 64 bits
FORCE_INLINE __m128i _mm_shuffle_epi_1001(__m128i a, __m128i b)
{
return vcombine_s32(vrev64_s32(vget_low_s32(a)), vget_low_s32(b));
}
// gets the lower 64 bits of a, swaps the 0 and 1 elements and places it in the upper 64 bits
// gets the lower 64 bits of b, swaps the 0 and 1 elements, and places it in the lower 64 bits
FORCE_INLINE __m128i _mm_shuffle_epi_0101(__m128i a, __m128i b)
{
return vcombine_s32(vrev64_s32(vget_low_s32(a)), vrev64_s32(vget_low_s32(b)));
}
FORCE_INLINE __m128i _mm_shuffle_epi_2211(__m128i a, __m128i b)
{
return vcombine_s32(vdup_n_s32(vgetq_lane_s32(a, 1)), vdup_n_s32(vgetq_lane_s32(b, 2)));
}
FORCE_INLINE __m128i _mm_shuffle_epi_0122(__m128i a, __m128i b)
{
return vcombine_s32(vdup_n_s32(vgetq_lane_s32(a, 2)), vrev64_s32(vget_low_s32(b)));
}
FORCE_INLINE __m128i _mm_shuffle_epi_3332(__m128i a, __m128i b)
{
return vcombine_s32(vget_high_s32(a), vdup_n_s32(vgetq_lane_s32(b, 3)));
}
template
FORCE_INLINE __m128i _mm_shuffle_epi32_default(__m128i a, __m128i b)
{
#if ENABLE_CPP_VERSION
__m128i ret;
ret[0] = a[i & 0x3];
ret[1] = a[(i >> 2) & 0x3];
ret[2] = b[(i >> 4) & 0x03];
ret[3] = b[(i >> 6) & 0x03];
return ret;
#else
__m128i ret = vmovq_n_s32(vgetq_lane_s32(a, i & 0x3));
ret = vsetq_lane_s32(vgetq_lane_s32(a, (i >> 2) & 0x3), ret, 1);
ret = vsetq_lane_s32(vgetq_lane_s32(b, (i >> 4) & 0x3), ret, 2);
ret = vsetq_lane_s32(vgetq_lane_s32(b, (i >> 6) & 0x3), ret, 3);
return ret;
#endif
}
template
FORCE_INLINE __m128i _mm_shuffle_epi32_function(__m128i a, __m128i b)
{
switch (i)
{
case _MM_SHUFFLE(1, 0, 3, 2): return _mm_shuffle_epi_1032(a, b); break;
case _MM_SHUFFLE(2, 3, 0, 1): return _mm_shuffle_epi_2301(a, b); break;
case _MM_SHUFFLE(0, 3, 2, 1): return _mm_shuffle_epi_0321(a, b); break;
case _MM_SHUFFLE(2, 1, 0, 3): return _mm_shuffle_epi_2103(a, b); break;
case _MM_SHUFFLE(1, 0, 1, 0): return _mm_shuffle_epi_1010(a, b); break;
case _MM_SHUFFLE(1, 0, 0, 1): return _mm_shuffle_epi_1001(a, b); break;
case _MM_SHUFFLE(0, 1, 0, 1): return _mm_shuffle_epi_0101(a, b); break;
case _MM_SHUFFLE(2, 2, 1, 1): return _mm_shuffle_epi_2211(a, b); break;
case _MM_SHUFFLE(0, 1, 2, 2): return _mm_shuffle_epi_0122(a, b); break;
case _MM_SHUFFLE(3, 3, 3, 2): return _mm_shuffle_epi_3332(a, b); break;
default: return _mm_shuffle_epi32_default(a, b);
}
}
template
FORCE_INLINE __m128i _mm_shuffle_epi32_splat(__m128i a)
{
return vdupq_n_s32(vgetq_lane_s32(a, i));
}
template
FORCE_INLINE __m128i _mm_shuffle_epi32_single(__m128i a)
{
switch (i)
{
case _MM_SHUFFLE(0, 0, 0, 0): return _mm_shuffle_epi32_splat<0>(a); break;
case _MM_SHUFFLE(1, 1, 1, 1): return _mm_shuffle_epi32_splat<1>(a); break;
case _MM_SHUFFLE(2, 2, 2, 2): return _mm_shuffle_epi32_splat<2>(a); break;
case _MM_SHUFFLE(3, 3, 3, 3): return _mm_shuffle_epi32_splat<3>(a); break;
default: return _mm_shuffle_epi32_function(a, a);
}
}
// Shuffles the 4 signed or unsigned 32-bit integers in a as specified by imm. https://msdn.microsoft.com/en-us/library/56f67xbk%28v=vs.90%29.aspx
#define _mm_shuffle_epi32(a,i) _mm_shuffle_epi32_single(a)
template
FORCE_INLINE __m128i _mm_shufflehi_epi16_function(__m128i a)
{
int16x8_t ret = (int16x8_t)a;
int16x4_t highBits = vget_high_s16(ret);
ret = vsetq_lane_s16(vget_lane_s16(highBits, i & 0x3), ret, 4);
ret = vsetq_lane_s16(vget_lane_s16(highBits, (i >> 2) & 0x3), ret, 5);
ret = vsetq_lane_s16(vget_lane_s16(highBits, (i >> 4) & 0x3), ret, 6);
ret = vsetq_lane_s16(vget_lane_s16(highBits, (i >> 6) & 0x3), ret, 7);
return (__m128i)ret;
}
// Shuffles the upper 4 signed or unsigned 16 - bit integers in a as specified by imm. https://msdn.microsoft.com/en-us/library/13ywktbs(v=vs.100).aspx
#define _mm_shufflehi_epi16(a,i) _mm_shufflehi_epi16_function(a)
// Shifts the 4 signed or unsigned 32-bit integers in a left by count bits while shifting in zeros. : https://msdn.microsoft.com/en-us/library/z2k3bbtb%28v=vs.90%29.aspx
#define _mm_slli_epi32(a, imm) (__m128i)vshlq_n_s32(a,imm)
//Shifts the 4 signed or unsigned 32-bit integers in a right by count bits while shifting in zeros. https://msdn.microsoft.com/en-us/library/w486zcfa(v=vs.100).aspx
#define _mm_srli_epi32( a, imm ) (__m128i)vshrq_n_u32((uint32x4_t)a, imm)
// Shifts the 4 signed 32 - bit integers in a right by count bits while shifting in the sign bit. https://msdn.microsoft.com/en-us/library/z1939387(v=vs.100).aspx
#define _mm_srai_epi32( a, imm ) vshrq_n_s32(a, imm)
// Shifts the 128 - bit value in a right by imm bytes while shifting in zeros.imm must be an immediate. https://msdn.microsoft.com/en-us/library/305w28yz(v=vs.100).aspx
//#define _mm_srli_si128( a, imm ) (__m128i)vmaxq_s8((int8x16_t)a, vextq_s8((int8x16_t)a, vdupq_n_s8(0), imm))
#define _mm_srli_si128( a, imm ) (__m128i)vextq_s8((int8x16_t)a, vdupq_n_s8(0), (imm))
// Shifts the 128-bit value in a left by imm bytes while shifting in zeros. imm must be an immediate. https://msdn.microsoft.com/en-us/library/34d3k2kt(v=vs.100).aspx
#define _mm_slli_si128( a, imm ) (__m128i)vextq_s8(vdupq_n_s8(0), (int8x16_t)a, 16 - (imm))
// NEON does not provide a version of this function, here is an article about some ways to repro the results.
// http://stackoverflow.com/questions/11870910/sse-mm-movemask-epi8-equivalent-method-for-arm-neon
// Creates a 16-bit mask from the most significant bits of the 16 signed or unsigned 8-bit integers in a and zero extends the upper bits. https://msdn.microsoft.com/en-us/library/vstudio/s090c8fk(v=vs.100).aspx
FORCE_INLINE int _mm_movemask_epi8(__m128i _a)
{
uint8x16_t input = (uint8x16_t)_a;
const int8_t __attribute__((aligned(16))) xr[8] = { -7, -6, -5, -4, -3, -2, -1, 0 };
uint8x8_t mask_and = vdup_n_u8(0x80);
int8x8_t mask_shift = vld1_s8(xr);
uint8x8_t lo = vget_low_u8(input);
uint8x8_t hi = vget_high_u8(input);
lo = vand_u8(lo, mask_and);
lo = vshl_u8(lo, mask_shift);
hi = vand_u8(hi, mask_and);
hi = vshl_u8(hi, mask_shift);
lo = vpadd_u8(lo, lo);
lo = vpadd_u8(lo, lo);
lo = vpadd_u8(lo, lo);
hi = vpadd_u8(hi, hi);
hi = vpadd_u8(hi, hi);
hi = vpadd_u8(hi, hi);
return ((hi[0] << 8) | (lo[0] & 0xFF));
}
// ******************************************
// Math operations
// ******************************************
// Subtracts the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/1zad2k61(v=vs.100).aspx
FORCE_INLINE __m128 _mm_sub_ps(__m128 a, __m128 b)
{
return vsubq_f32(a, b);
}
// Subtracts the 4 signed or unsigned 32-bit integers of b from the 4 signed or unsigned 32-bit integers of a. https://msdn.microsoft.com/en-us/library/vstudio/fhh866h0(v=vs.100).aspx
FORCE_INLINE __m128i _mm_sub_epi32(__m128i a, __m128i b)
{
return vsubq_s32(a, b);
}
// Adds the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/c9848chc(v=vs.100).aspx
FORCE_INLINE __m128 _mm_add_ps(__m128 a, __m128 b)
{
return vaddq_f32(a, b);
}
// Adds the 4 signed or unsigned 32-bit integers in a to the 4 signed or unsigned 32-bit integers in b. https://msdn.microsoft.com/en-us/library/vstudio/09xs4fkk(v=vs.100).aspx
FORCE_INLINE __m128i _mm_add_epi32(__m128i a, __m128i b)
{
return vaddq_s32(a, b);
}
// Adds the 8 signed or unsigned 16-bit integers in a to the 8 signed or unsigned 16-bit integers in b. https://msdn.microsoft.com/en-us/library/fceha5k4(v=vs.100).aspx
FORCE_INLINE __m128i _mm_add_epi16(__m128i a, __m128i b)
{
return (__m128i)vaddq_s16((int16x8_t)a, (int16x8_t)b);
}
// Multiplies the 8 signed or unsigned 16-bit integers from a by the 8 signed or unsigned 16-bit integers from b. https://msdn.microsoft.com/en-us/library/vstudio/9ks1472s(v=vs.100).aspx
FORCE_INLINE __m128i _mm_mullo_epi16(__m128i a, __m128i b)
{
return (__m128i)vmulq_s16((int16x8_t)a, (int16x8_t)b);
}
// Multiplies the 4 signed or unsigned 32-bit integers from a by the 4 signed or unsigned 32-bit integers from b. https://msdn.microsoft.com/en-us/library/vstudio/bb531409(v=vs.100).aspx
FORCE_INLINE __m128i _mm_mullo_epi32 (__m128i a, __m128i b)
{
return (__m128i)vmulq_s32((int32x4_t)a,(int32x4_t)b);
}
// Multiplies the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/22kbk6t9(v=vs.100).aspx
FORCE_INLINE __m128 _mm_mul_ps(__m128 a, __m128 b)
{
return vmulq_f32(a, b);
}
// This version does additional iterations to improve accuracy. Between 1 and 4 recommended.
// Computes the approximations of reciprocals of the four single-precision, floating-point values of a. https://msdn.microsoft.com/en-us/library/vstudio/796k1tty(v=vs.100).aspx
FORCE_INLINE __m128 recipq_newton(__m128 in, int n)
{
__m128 recip = vrecpeq_f32(in);
for (int i = 0; i